diff options
Diffstat (limited to 'contrib/llvm-project/clang/lib/AST')
138 files changed, 40792 insertions, 10408 deletions
diff --git a/contrib/llvm-project/clang/lib/AST/APValue.cpp b/contrib/llvm-project/clang/lib/AST/APValue.cpp index 9a9233bc1ea7..d8e33ff421c0 100644 --- a/contrib/llvm-project/clang/lib/AST/APValue.cpp +++ b/contrib/llvm-project/clang/lib/AST/APValue.cpp @@ -90,7 +90,7 @@ QualType APValue::LValueBase::getType() const { // For a materialized temporary, the type of the temporary we materialized // may not be the type of the expression. if (const MaterializeTemporaryExpr *MTE = - clang::dyn_cast<MaterializeTemporaryExpr>(Base)) { + llvm::dyn_cast<MaterializeTemporaryExpr>(Base)) { SmallVector<const Expr *, 2> CommaLHSs; SmallVector<SubobjectAdjustment, 2> Adjustments; const Expr *Temp = MTE->getSubExpr(); @@ -156,10 +156,10 @@ void APValue::LValuePathEntry::Profile(llvm::FoldingSetNodeID &ID) const { APValue::LValuePathSerializationHelper::LValuePathSerializationHelper( ArrayRef<LValuePathEntry> Path, QualType ElemTy) - : ElemTy((const void *)ElemTy.getTypePtrOrNull()), Path(Path) {} + : Ty((const void *)ElemTy.getTypePtrOrNull()), Path(Path) {} QualType APValue::LValuePathSerializationHelper::getType() { - return QualType::getFromOpaquePtr(ElemTy); + return QualType::getFromOpaquePtr(Ty); } namespace { @@ -390,11 +390,13 @@ APValue &APValue::operator=(const APValue &RHS) { } APValue &APValue::operator=(APValue &&RHS) { - if (Kind != None && Kind != Indeterminate) - DestroyDataAndMakeUninit(); - Kind = RHS.Kind; - Data = RHS.Data; - RHS.Kind = None; + if (this != &RHS) { + if (Kind != None && Kind != Indeterminate) + DestroyDataAndMakeUninit(); + Kind = RHS.Kind; + Data = RHS.Data; + RHS.Kind = None; + } return *this; } @@ -625,6 +627,69 @@ static double GetApproxValue(const llvm::APFloat &F) { return V.convertToDouble(); } +static bool TryPrintAsStringLiteral(raw_ostream &Out, + const PrintingPolicy &Policy, + const ArrayType *ATy, + ArrayRef<APValue> Inits) { + if (Inits.empty()) + return false; + + QualType Ty = ATy->getElementType(); + if (!Ty->isAnyCharacterType()) + return false; + + // Nothing we can do about a sequence that is not null-terminated + if (!Inits.back().isInt() || !Inits.back().getInt().isZero()) + return false; + + Inits = Inits.drop_back(); + + llvm::SmallString<40> Buf; + Buf.push_back('"'); + + // Better than printing a two-digit sequence of 10 integers. + constexpr size_t MaxN = 36; + StringRef Ellipsis; + if (Inits.size() > MaxN && !Policy.EntireContentsOfLargeArray) { + Ellipsis = "[...]"; + Inits = + Inits.take_front(std::min(MaxN - Ellipsis.size() / 2, Inits.size())); + } + + for (auto &Val : Inits) { + if (!Val.isInt()) + return false; + int64_t Char64 = Val.getInt().getExtValue(); + if (!isASCII(Char64)) + return false; // Bye bye, see you in integers. + auto Ch = static_cast<unsigned char>(Char64); + // The diagnostic message is 'quoted' + StringRef Escaped = escapeCStyle<EscapeChar::SingleAndDouble>(Ch); + if (Escaped.empty()) { + if (!isPrintable(Ch)) + return false; + Buf.emplace_back(Ch); + } else { + Buf.append(Escaped); + } + } + + Buf.append(Ellipsis); + Buf.push_back('"'); + + if (Ty->isWideCharType()) + Out << 'L'; + else if (Ty->isChar8Type()) + Out << "u8"; + else if (Ty->isChar16Type()) + Out << 'u'; + else if (Ty->isChar32Type()) + Out << 'U'; + + Out << Buf; + return true; +} + void APValue::printPretty(raw_ostream &Out, const ASTContext &Ctx, QualType Ty) const { printPretty(Out, Ctx.getPrintingPolicy(), Ty, &Ctx); @@ -639,6 +704,9 @@ void APValue::printPretty(raw_ostream &Out, const PrintingPolicy &Policy, return; } + if (const auto *AT = Ty->getAs<AtomicType>()) + Ty = AT->getValueType(); + switch (getKind()) { case APValue::None: Out << "<out of lifetime>"; @@ -700,7 +768,9 @@ void APValue::printPretty(raw_ostream &Out, const PrintingPolicy &Policy, if (!hasLValuePath()) { // No lvalue path: just print the offset. CharUnits O = getLValueOffset(); - CharUnits S = Ctx ? Ctx->getTypeSizeInChars(InnerTy) : CharUnits::Zero(); + CharUnits S = Ctx ? Ctx->getTypeSizeInCharsIfKnown(InnerTy).value_or( + CharUnits::Zero()) + : CharUnits::Zero(); if (!O.isZero()) { if (IsReference) Out << "*("; @@ -774,6 +844,10 @@ void APValue::printPretty(raw_ostream &Out, const PrintingPolicy &Policy, Out << *VD; ElemTy = VD->getType(); } + } else if (ElemTy->isAnyComplexType()) { + // The lvalue refers to a complex type + Out << (Path[I].getAsArrayIndex() == 0 ? ".real" : ".imag"); + ElemTy = ElemTy->castAs<ComplexType>()->getElementType(); } else { // The lvalue must refer to an array. Out << '[' << Path[I].getAsArrayIndex() << ']'; @@ -793,17 +867,23 @@ void APValue::printPretty(raw_ostream &Out, const PrintingPolicy &Policy, } case APValue::Array: { const ArrayType *AT = Ty->castAsArrayTypeUnsafe(); + unsigned N = getArrayInitializedElts(); + if (N != 0 && TryPrintAsStringLiteral(Out, Policy, AT, + {&getArrayInitializedElt(0), N})) + return; QualType ElemTy = AT->getElementType(); Out << '{'; - if (unsigned N = getArrayInitializedElts()) { - getArrayInitializedElt(0).printPretty(Out, Policy, ElemTy, Ctx); - for (unsigned I = 1; I != N; ++I) { + unsigned I = 0; + switch (N) { + case 0: + for (; I != N; ++I) { Out << ", "; - if (I == 10) { - // Avoid printing out the entire contents of large arrays. - Out << "..."; - break; + if (I == 10 && !Policy.EntireContentsOfLargeArray) { + Out << "...}"; + return; } + [[fallthrough]]; + default: getArrayInitializedElt(I).printPretty(Out, Policy, ElemTy, Ctx); } } @@ -828,7 +908,8 @@ void APValue::printPretty(raw_ostream &Out, const PrintingPolicy &Policy, for (const auto *FI : RD->fields()) { if (!First) Out << ", "; - if (FI->isUnnamedBitfield()) continue; + if (FI->isUnnamedBitField()) + continue; getStructField(FI->getFieldIndex()). printPretty(Out, Policy, FI->getType(), Ctx); First = false; @@ -913,7 +994,7 @@ bool APValue::hasLValuePath() const { ArrayRef<APValue::LValuePathEntry> APValue::getLValuePath() const { assert(isLValue() && hasLValuePath() && "Invalid accessor"); const LV &LVal = *((const LV *)(const char *)&Data); - return llvm::makeArrayRef(LVal.getPath(), LVal.PathLength); + return llvm::ArrayRef(LVal.getPath(), LVal.PathLength); } unsigned APValue::getLValueCallIndex() const { @@ -991,7 +1072,7 @@ ArrayRef<const CXXRecordDecl*> APValue::getMemberPointerPath() const { assert(isMemberPointer() && "Invalid accessor"); const MemberPointerData &MPD = *((const MemberPointerData *)(const char *)&Data); - return llvm::makeArrayRef(MPD.getPath(), MPD.PathLength); + return llvm::ArrayRef(MPD.getPath(), MPD.PathLength); } void APValue::MakeLValue() { @@ -1038,7 +1119,7 @@ LinkageInfo LinkageComputer::getLVForValue(const APValue &V, auto MergeLV = [&](LinkageInfo MergeLV) { LV.merge(MergeLV); - return LV.getLinkage() == InternalLinkage; + return LV.getLinkage() == Linkage::Internal; }; auto Merge = [&](const APValue &V) { return MergeLV(getLVForValue(V, computation)); diff --git a/contrib/llvm-project/clang/lib/AST/ASTConcept.cpp b/contrib/llvm-project/clang/lib/AST/ASTConcept.cpp index 549088ad4a8a..d8efbe44dbec 100644 --- a/contrib/llvm-project/clang/lib/AST/ASTConcept.cpp +++ b/contrib/llvm-project/clang/lib/AST/ASTConcept.cpp @@ -1,9 +1,8 @@ //===--- ASTConcept.cpp - Concepts Related AST Data Structures --*- C++ -*-===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// @@ -14,39 +13,49 @@ #include "clang/AST/ASTConcept.h" #include "clang/AST/ASTContext.h" -#include "clang/AST/Decl.h" -#include "clang/AST/TemplateBase.h" +#include "clang/AST/PrettyPrinter.h" #include "llvm/ADT/ArrayRef.h" -#include "llvm/ADT/FoldingSet.h" +#include "llvm/ADT/StringExtras.h" + using namespace clang; -ASTConstraintSatisfaction::ASTConstraintSatisfaction(const ASTContext &C, - const ConstraintSatisfaction &Satisfaction): - NumRecords{Satisfaction.Details.size()}, - IsSatisfied{Satisfaction.IsSatisfied} { - for (unsigned I = 0; I < NumRecords; ++I) { - auto &Detail = Satisfaction.Details[I]; - if (Detail.second.is<Expr *>()) - new (getTrailingObjects<UnsatisfiedConstraintRecord>() + I) - UnsatisfiedConstraintRecord{Detail.first, - UnsatisfiedConstraintRecord::second_type( - Detail.second.get<Expr *>())}; - else { - auto &SubstitutionDiagnostic = - *Detail.second.get<std::pair<SourceLocation, StringRef> *>(); - unsigned MessageSize = SubstitutionDiagnostic.second.size(); - char *Mem = new (C) char[MessageSize]; - memcpy(Mem, SubstitutionDiagnostic.second.data(), MessageSize); - auto *NewSubstDiag = new (C) std::pair<SourceLocation, StringRef>( - SubstitutionDiagnostic.first, StringRef(Mem, MessageSize)); - new (getTrailingObjects<UnsatisfiedConstraintRecord>() + I) - UnsatisfiedConstraintRecord{Detail.first, - UnsatisfiedConstraintRecord::second_type( - NewSubstDiag)}; - } +static void +CreateUnsatisfiedConstraintRecord(const ASTContext &C, + const UnsatisfiedConstraintRecord &Detail, + UnsatisfiedConstraintRecord *TrailingObject) { + if (Detail.is<Expr *>()) + new (TrailingObject) UnsatisfiedConstraintRecord(Detail.get<Expr *>()); + else { + auto &SubstitutionDiagnostic = + *Detail.get<std::pair<SourceLocation, StringRef> *>(); + StringRef Message = C.backupStr(SubstitutionDiagnostic.second); + auto *NewSubstDiag = new (C) std::pair<SourceLocation, StringRef>( + SubstitutionDiagnostic.first, Message); + new (TrailingObject) UnsatisfiedConstraintRecord(NewSubstDiag); } } +ASTConstraintSatisfaction::ASTConstraintSatisfaction( + const ASTContext &C, const ConstraintSatisfaction &Satisfaction) + : NumRecords{Satisfaction.Details.size()}, + IsSatisfied{Satisfaction.IsSatisfied}, ContainsErrors{ + Satisfaction.ContainsErrors} { + for (unsigned I = 0; I < NumRecords; ++I) + CreateUnsatisfiedConstraintRecord( + C, Satisfaction.Details[I], + getTrailingObjects<UnsatisfiedConstraintRecord>() + I); +} + +ASTConstraintSatisfaction::ASTConstraintSatisfaction( + const ASTContext &C, const ASTConstraintSatisfaction &Satisfaction) + : NumRecords{Satisfaction.NumRecords}, + IsSatisfied{Satisfaction.IsSatisfied}, + ContainsErrors{Satisfaction.ContainsErrors} { + for (unsigned I = 0; I < NumRecords; ++I) + CreateUnsatisfiedConstraintRecord( + C, *(Satisfaction.begin() + I), + getTrailingObjects<UnsatisfiedConstraintRecord>() + I); +} ASTConstraintSatisfaction * ASTConstraintSatisfaction::Create(const ASTContext &C, @@ -58,6 +67,14 @@ ASTConstraintSatisfaction::Create(const ASTContext &C, return new (Mem) ASTConstraintSatisfaction(C, Satisfaction); } +ASTConstraintSatisfaction *ASTConstraintSatisfaction::Rebuild( + const ASTContext &C, const ASTConstraintSatisfaction &Satisfaction) { + std::size_t size = + totalSizeToAlloc<UnsatisfiedConstraintRecord>(Satisfaction.NumRecords); + void *Mem = C.Allocate(size, alignof(ASTConstraintSatisfaction)); + return new (Mem) ASTConstraintSatisfaction(C, Satisfaction); +} + void ConstraintSatisfaction::Profile( llvm::FoldingSetNodeID &ID, const ASTContext &C, const NamedDecl *ConstraintOwner, ArrayRef<TemplateArgument> TemplateArgs) { @@ -66,3 +83,30 @@ void ConstraintSatisfaction::Profile( for (auto &Arg : TemplateArgs) Arg.Profile(ID, C); } + +ConceptReference * +ConceptReference::Create(const ASTContext &C, NestedNameSpecifierLoc NNS, + SourceLocation TemplateKWLoc, + DeclarationNameInfo ConceptNameInfo, + NamedDecl *FoundDecl, ConceptDecl *NamedConcept, + const ASTTemplateArgumentListInfo *ArgsAsWritten) { + return new (C) ConceptReference(NNS, TemplateKWLoc, ConceptNameInfo, + FoundDecl, NamedConcept, ArgsAsWritten); +} + +void ConceptReference::print(llvm::raw_ostream &OS, + const PrintingPolicy &Policy) const { + if (NestedNameSpec) + NestedNameSpec.getNestedNameSpecifier()->print(OS, Policy); + ConceptName.printName(OS, Policy); + if (hasExplicitTemplateArgs()) { + OS << "<"; + llvm::ListSeparator Sep(", "); + // FIXME: Find corresponding parameter for argument + for (auto &ArgLoc : ArgsAsWritten->arguments()) { + OS << Sep; + ArgLoc.getArgument().print(Policy, OS, /*IncludeType*/ false); + } + OS << ">"; + } +} diff --git a/contrib/llvm-project/clang/lib/AST/ASTContext.cpp b/contrib/llvm-project/clang/lib/AST/ASTContext.cpp index 0e163f3161a3..1064507f3461 100644 --- a/contrib/llvm-project/clang/lib/AST/ASTContext.cpp +++ b/contrib/llvm-project/clang/lib/AST/ASTContext.cpp @@ -41,6 +41,7 @@ #include "clang/AST/RawCommentList.h" #include "clang/AST/RecordLayout.h" #include "clang/AST/Stmt.h" +#include "clang/AST/StmtOpenACC.h" #include "clang/AST/TemplateBase.h" #include "clang/AST/TemplateName.h" #include "clang/AST/Type.h" @@ -58,6 +59,7 @@ #include "clang/Basic/Module.h" #include "clang/Basic/NoSanitizeList.h" #include "clang/Basic/ObjCRuntime.h" +#include "clang/Basic/ProfileList.h" #include "clang/Basic/SourceLocation.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/Specifiers.h" @@ -71,22 +73,23 @@ #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/FoldingSet.h" -#include "llvm/ADT/None.h" -#include "llvm/ADT/Optional.h" #include "llvm/ADT/PointerUnion.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/StringRef.h" -#include "llvm/ADT/Triple.h" +#include "llvm/Frontend/OpenMP/OMPIRBuilder.h" #include "llvm/Support/Capacity.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MD5.h" #include "llvm/Support/MathExtras.h" +#include "llvm/Support/SipHash.h" #include "llvm/Support/raw_ostream.h" +#include "llvm/TargetParser/AArch64TargetParser.h" +#include "llvm/TargetParser/Triple.h" #include <algorithm> #include <cassert> #include <cstddef> @@ -94,6 +97,7 @@ #include <cstdlib> #include <map> #include <memory> +#include <optional> #include <string> #include <tuple> #include <utility> @@ -101,13 +105,20 @@ using namespace clang; enum FloatingRank { - BFloat16Rank, Float16Rank, HalfRank, FloatRank, DoubleRank, LongDoubleRank, Float128Rank + BFloat16Rank, + Float16Rank, + HalfRank, + FloatRank, + DoubleRank, + LongDoubleRank, + Float128Rank, + Ibm128Rank }; -/// \returns location that is relevant when searching for Doc comments related -/// to \p D. -static SourceLocation getDeclLocForCommentSearch(const Decl *D, - SourceManager &SourceMgr) { +/// \returns The locations that are relevant when searching for Doc comments +/// related to \p D. +static SmallVector<SourceLocation, 2> +getDeclLocsForCommentSearch(const Decl *D, SourceManager &SourceMgr) { assert(D); // User can not attach documentation to implicit declarations. @@ -159,42 +170,48 @@ static SourceLocation getDeclLocForCommentSearch(const Decl *D, isa<TemplateTemplateParmDecl>(D)) return {}; + SmallVector<SourceLocation, 2> Locations; // Find declaration location. // For Objective-C declarations we generally don't expect to have multiple // declarators, thus use declaration starting location as the "declaration // location". // For all other declarations multiple declarators are used quite frequently, // so we use the location of the identifier as the "declaration location". + SourceLocation BaseLocation; if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) || - isa<ObjCPropertyDecl>(D) || - isa<RedeclarableTemplateDecl>(D) || + isa<ObjCPropertyDecl>(D) || isa<RedeclarableTemplateDecl>(D) || isa<ClassTemplateSpecializationDecl>(D) || // Allow association with Y across {} in `typedef struct X {} Y`. isa<TypedefDecl>(D)) - return D->getBeginLoc(); - else { - const SourceLocation DeclLoc = D->getLocation(); - if (DeclLoc.isMacroID()) { - if (isa<TypedefDecl>(D)) { - // If location of the typedef name is in a macro, it is because being - // declared via a macro. Try using declaration's starting location as - // the "declaration location". - return D->getBeginLoc(); - } else if (const auto *TD = dyn_cast<TagDecl>(D)) { - // If location of the tag decl is inside a macro, but the spelling of - // the tag name comes from a macro argument, it looks like a special - // macro like NS_ENUM is being used to define the tag decl. In that - // case, adjust the source location to the expansion loc so that we can - // attach the comment to the tag decl. - if (SourceMgr.isMacroArgExpansion(DeclLoc) && - TD->isCompleteDefinition()) - return SourceMgr.getExpansionLoc(DeclLoc); - } + BaseLocation = D->getBeginLoc(); + else + BaseLocation = D->getLocation(); + + if (!D->getLocation().isMacroID()) { + Locations.emplace_back(BaseLocation); + } else { + const auto *DeclCtx = D->getDeclContext(); + + // When encountering definitions generated from a macro (that are not + // contained by another declaration in the macro) we need to try and find + // the comment at the location of the expansion but if there is no comment + // there we should retry to see if there is a comment inside the macro as + // well. To this end we return first BaseLocation to first look at the + // expansion site, the second value is the spelling location of the + // beginning of the declaration defined inside the macro. + if (!(DeclCtx && + Decl::castFromDeclContext(DeclCtx)->getLocation().isMacroID())) { + Locations.emplace_back(SourceMgr.getExpansionLoc(BaseLocation)); } - return DeclLoc; + + // We use Decl::getBeginLoc() and not just BaseLocation here to ensure that + // we don't refer to the macro argument location at the expansion site (this + // can happen if the name's spelling is provided via macro argument), and + // always to the declaration itself. + Locations.emplace_back(SourceMgr.getSpellingLoc(D->getBeginLoc())); } - return {}; + return Locations; } RawComment *ASTContext::getRawCommentForDeclNoCacheImpl( @@ -269,34 +286,43 @@ RawComment *ASTContext::getRawCommentForDeclNoCacheImpl( // There should be no other declarations or preprocessor directives between // comment and declaration. - if (Text.find_first_of(";{}#@") != StringRef::npos) + if (Text.find_last_of(";{}#@") != StringRef::npos) return nullptr; return CommentBeforeDecl; } RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const { - const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr); + const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr); - // If the declaration doesn't map directly to a location in a file, we - // can't find the comment. - if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) - return nullptr; + for (const auto DeclLoc : DeclLocs) { + // If the declaration doesn't map directly to a location in a file, we + // can't find the comment. + if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) + continue; - if (ExternalSource && !CommentsLoaded) { - ExternalSource->ReadComments(); - CommentsLoaded = true; - } + if (ExternalSource && !CommentsLoaded) { + ExternalSource->ReadComments(); + CommentsLoaded = true; + } - if (Comments.empty()) - return nullptr; + if (Comments.empty()) + continue; - const FileID File = SourceMgr.getDecomposedLoc(DeclLoc).first; - const auto CommentsInThisFile = Comments.getCommentsInFile(File); - if (!CommentsInThisFile || CommentsInThisFile->empty()) - return nullptr; + const FileID File = SourceMgr.getDecomposedLoc(DeclLoc).first; + if (!File.isValid()) + continue; + + const auto CommentsInThisFile = Comments.getCommentsInFile(File); + if (!CommentsInThisFile || CommentsInThisFile->empty()) + continue; - return getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile); + if (RawComment *Comment = + getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile)) + return Comment; + } + + return nullptr; } void ASTContext::addComment(const RawComment &RC) { @@ -416,10 +442,7 @@ const RawComment *ASTContext::getRawCommentForAnyRedecl( // Any redeclarations of D that we haven't checked for comments yet? // We can't use DenseMap::iterator directly since it'd get invalid. auto LastCheckedRedecl = [this, CanonicalD]() -> const Decl * { - auto LookupRes = CommentlessRedeclChains.find(CanonicalD); - if (LookupRes != CommentlessRedeclChains.end()) - return LookupRes->second; - return nullptr; + return CommentlessRedeclChains.lookup(CanonicalD); }(); for (const auto Redecl : D->redecls()) { @@ -478,7 +501,11 @@ void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls, return; FileID File; - for (Decl *D : Decls) { + for (const Decl *D : Decls) { + if (D->isInvalidDecl()) + continue; + + D = &adjustDeclToTemplate(*D); SourceLocation Loc = D->getLocation(); if (Loc.isValid()) { // See if there are any new comments that are not attached to a decl. @@ -503,7 +530,6 @@ void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls, // declaration, but also comments that *follow* the declaration -- thanks to // the lookahead in the lexer: we've consumed the semicolon and looked // ahead through comments. - for (const Decl *D : Decls) { assert(D); if (D->isInvalidDecl()) @@ -511,19 +537,22 @@ void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls, D = &adjustDeclToTemplate(*D); - const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr); - - if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) - continue; - if (DeclRawComments.count(D) > 0) continue; - if (RawComment *const DocComment = - getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile)) { - cacheRawCommentForDecl(*D, *DocComment); - comments::FullComment *FC = DocComment->parse(*this, PP, D); - ParsedComments[D->getCanonicalDecl()] = FC; + const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr); + + for (const auto DeclLoc : DeclLocs) { + if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) + continue; + + if (RawComment *const DocComment = getRawCommentForDeclNoCacheImpl( + D, DeclLoc, *CommentsInThisFile)) { + cacheRawCommentForDecl(*D, *DocComment); + comments::FullComment *FC = DocComment->parse(*this, PP, D); + ParsedComments[D->getCanonicalDecl()] = FC; + break; + } } } } @@ -671,11 +700,6 @@ ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID, if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { ID.AddInteger(0); ID.AddBoolean(TTP->isParameterPack()); - const TypeConstraint *TC = TTP->getTypeConstraint(); - ID.AddBoolean(TC != nullptr); - if (TC) - TC->getImmediatelyDeclaredConstraint()->Profile(ID, C, - /*Canonical=*/true); if (TTP->isExpandedParameterPack()) { ID.AddBoolean(true); ID.AddInteger(TTP->getNumExpansionParameters()); @@ -687,7 +711,8 @@ ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID, if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { ID.AddInteger(1); ID.AddBoolean(NTTP->isParameterPack()); - ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr()); + ID.AddPointer(C.getUnconstrainedType(C.getCanonicalType(NTTP->getType())) + .getAsOpaquePtr()); if (NTTP->isExpandedParameterPack()) { ID.AddBoolean(true); ID.AddInteger(NTTP->getNumExpansionTypes()); @@ -704,61 +729,6 @@ ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID, ID.AddInteger(2); Profile(ID, C, TTP); } - Expr *RequiresClause = Parm->getTemplateParameters()->getRequiresClause(); - ID.AddBoolean(RequiresClause != nullptr); - if (RequiresClause) - RequiresClause->Profile(ID, C, /*Canonical=*/true); -} - -static Expr * -canonicalizeImmediatelyDeclaredConstraint(const ASTContext &C, Expr *IDC, - QualType ConstrainedType) { - // This is a bit ugly - we need to form a new immediately-declared - // constraint that references the new parameter; this would ideally - // require semantic analysis (e.g. template<C T> struct S {}; - the - // converted arguments of C<T> could be an argument pack if C is - // declared as template<typename... T> concept C = ...). - // We don't have semantic analysis here so we dig deep into the - // ready-made constraint expr and change the thing manually. - ConceptSpecializationExpr *CSE; - if (const auto *Fold = dyn_cast<CXXFoldExpr>(IDC)) - CSE = cast<ConceptSpecializationExpr>(Fold->getLHS()); - else - CSE = cast<ConceptSpecializationExpr>(IDC); - ArrayRef<TemplateArgument> OldConverted = CSE->getTemplateArguments(); - SmallVector<TemplateArgument, 3> NewConverted; - NewConverted.reserve(OldConverted.size()); - if (OldConverted.front().getKind() == TemplateArgument::Pack) { - // The case: - // template<typename... T> concept C = true; - // template<C<int> T> struct S; -> constraint is C<{T, int}> - NewConverted.push_back(ConstrainedType); - for (auto &Arg : OldConverted.front().pack_elements().drop_front(1)) - NewConverted.push_back(Arg); - TemplateArgument NewPack(NewConverted); - - NewConverted.clear(); - NewConverted.push_back(NewPack); - assert(OldConverted.size() == 1 && - "Template parameter pack should be the last parameter"); - } else { - assert(OldConverted.front().getKind() == TemplateArgument::Type && - "Unexpected first argument kind for immediately-declared " - "constraint"); - NewConverted.push_back(ConstrainedType); - for (auto &Arg : OldConverted.drop_front(1)) - NewConverted.push_back(Arg); - } - Expr *NewIDC = ConceptSpecializationExpr::Create( - C, CSE->getNamedConcept(), NewConverted, nullptr, - CSE->isInstantiationDependent(), CSE->containsUnexpandedParameterPack()); - - if (auto *OrigFold = dyn_cast<CXXFoldExpr>(IDC)) - NewIDC = new (C) CXXFoldExpr( - OrigFold->getType(), /*Callee*/nullptr, SourceLocation(), NewIDC, - BinaryOperatorKind::BO_LAnd, SourceLocation(), /*RHS=*/nullptr, - SourceLocation(), /*NumExpansions=*/None); - return NewIDC; } TemplateTemplateParmDecl * @@ -780,35 +750,19 @@ ASTContext::getCanonicalTemplateTemplateParmDecl( for (TemplateParameterList::const_iterator P = Params->begin(), PEnd = Params->end(); P != PEnd; ++P) { + // Note that, per C++20 [temp.over.link]/6, when determining whether + // template-parameters are equivalent, constraints are ignored. if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { - TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create(*this, - getTranslationUnitDecl(), SourceLocation(), SourceLocation(), + TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create( + *this, getTranslationUnitDecl(), SourceLocation(), SourceLocation(), TTP->getDepth(), TTP->getIndex(), nullptr, false, - TTP->isParameterPack(), TTP->hasTypeConstraint(), - TTP->isExpandedParameterPack() ? - llvm::Optional<unsigned>(TTP->getNumExpansionParameters()) : None); - if (const auto *TC = TTP->getTypeConstraint()) { - QualType ParamAsArgument(NewTTP->getTypeForDecl(), 0); - Expr *NewIDC = canonicalizeImmediatelyDeclaredConstraint( - *this, TC->getImmediatelyDeclaredConstraint(), - ParamAsArgument); - TemplateArgumentListInfo CanonArgsAsWritten; - if (auto *Args = TC->getTemplateArgsAsWritten()) - for (const auto &ArgLoc : Args->arguments()) - CanonArgsAsWritten.addArgument( - TemplateArgumentLoc(ArgLoc.getArgument(), - TemplateArgumentLocInfo())); - NewTTP->setTypeConstraint( - NestedNameSpecifierLoc(), - DeclarationNameInfo(TC->getNamedConcept()->getDeclName(), - SourceLocation()), /*FoundDecl=*/nullptr, - // Actually canonicalizing a TemplateArgumentLoc is difficult so we - // simply omit the ArgsAsWritten - TC->getNamedConcept(), /*ArgsAsWritten=*/nullptr, NewIDC); - } + TTP->isParameterPack(), /*HasTypeConstraint=*/false, + TTP->isExpandedParameterPack() + ? std::optional<unsigned>(TTP->getNumExpansionParameters()) + : std::nullopt); CanonParams.push_back(NewTTP); } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { - QualType T = getCanonicalType(NTTP->getType()); + QualType T = getUnconstrainedType(getCanonicalType(NTTP->getType())); TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); NonTypeTemplateParmDecl *Param; if (NTTP->isExpandedParameterPack()) { @@ -839,35 +793,18 @@ ASTContext::getCanonicalTemplateTemplateParmDecl( NTTP->isParameterPack(), TInfo); } - if (AutoType *AT = T->getContainedAutoType()) { - if (AT->isConstrained()) { - Param->setPlaceholderTypeConstraint( - canonicalizeImmediatelyDeclaredConstraint( - *this, NTTP->getPlaceholderTypeConstraint(), T)); - } - } CanonParams.push_back(Param); - } else CanonParams.push_back(getCanonicalTemplateTemplateParmDecl( cast<TemplateTemplateParmDecl>(*P))); } - Expr *CanonRequiresClause = nullptr; - if (Expr *RequiresClause = TTP->getTemplateParameters()->getRequiresClause()) - CanonRequiresClause = RequiresClause; - - TemplateTemplateParmDecl *CanonTTP - = TemplateTemplateParmDecl::Create(*this, getTranslationUnitDecl(), - SourceLocation(), TTP->getDepth(), - TTP->getPosition(), - TTP->isParameterPack(), - nullptr, - TemplateParameterList::Create(*this, SourceLocation(), - SourceLocation(), - CanonParams, - SourceLocation(), - CanonRequiresClause)); + TemplateTemplateParmDecl *CanonTTP = TemplateTemplateParmDecl::Create( + *this, getTranslationUnitDecl(), SourceLocation(), TTP->getDepth(), + TTP->getPosition(), TTP->isParameterPack(), nullptr, /*Typename=*/false, + TemplateParameterList::Create(*this, SourceLocation(), SourceLocation(), + CanonParams, SourceLocation(), + /*RequiresClause=*/nullptr)); // Get the new insert position for the node we care about. Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); @@ -882,7 +819,7 @@ ASTContext::getCanonicalTemplateTemplateParmDecl( TargetCXXABI::Kind ASTContext::getCXXABIKind() const { auto Kind = getTargetInfo().getCXXABI().getKind(); - return getLangOpts().CXXABI.getValueOr(Kind); + return getLangOpts().CXXABI.value_or(Kind); } CXXABI *ASTContext::createCXXABI(const TargetInfo &T) { @@ -919,38 +856,6 @@ ParentMapContext &ASTContext::getParentMapContext() { return *ParentMapCtx.get(); } -static const LangASMap *getAddressSpaceMap(const TargetInfo &T, - const LangOptions &LOpts) { - if (LOpts.FakeAddressSpaceMap) { - // The fake address space map must have a distinct entry for each - // language-specific address space. - static const unsigned FakeAddrSpaceMap[] = { - 0, // Default - 1, // opencl_global - 3, // opencl_local - 2, // opencl_constant - 0, // opencl_private - 4, // opencl_generic - 5, // opencl_global_device - 6, // opencl_global_host - 7, // cuda_device - 8, // cuda_constant - 9, // cuda_shared - 1, // sycl_global - 5, // sycl_global_device - 6, // sycl_global_host - 3, // sycl_local - 0, // sycl_private - 10, // ptr32_sptr - 11, // ptr32_uptr - 12 // ptr64 - }; - return &FakeAddrSpaceMap; - } else { - return &T.getAddressSpaceMap(); - } -} - static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI, const LangOptions &LangOpts) { switch (LangOpts.getAddressSpaceMapMangling()) { @@ -967,11 +872,17 @@ static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI, ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM, IdentifierTable &idents, SelectorTable &sels, Builtin::Context &builtins, TranslationUnitKind TUKind) - : ConstantArrayTypes(this_()), FunctionProtoTypes(this_()), + : ConstantArrayTypes(this_(), ConstantArrayTypesLog2InitSize), + DependentSizedArrayTypes(this_()), DependentSizedExtVectorTypes(this_()), + DependentAddressSpaceTypes(this_()), DependentVectorTypes(this_()), + DependentSizedMatrixTypes(this_()), + FunctionProtoTypes(this_(), FunctionProtoTypesLog2InitSize), + DependentTypeOfExprTypes(this_()), DependentDecltypeTypes(this_()), TemplateSpecializationTypes(this_()), DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()), - SubstTemplateTemplateParmPacks(this_()), - CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts), + DependentBitIntTypes(this_()), SubstTemplateTemplateParmPacks(this_()), + ArrayParameterTypes(this_()), CanonTemplateTemplateParms(this_()), + SourceMgr(SM), LangOpts(LOpts), NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)), XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles, LangOpts.XRayNeverInstrumentFiles, @@ -984,7 +895,7 @@ ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM, addTranslationUnitDecl(); } -ASTContext::~ASTContext() { +void ASTContext::cleanup() { // Release the DenseMaps associated with DeclContext objects. // FIXME: Is this the ideal solution? ReleaseDeclContextMaps(); @@ -992,6 +903,7 @@ ASTContext::~ASTContext() { // Call all of the deallocation functions on all of their targets. for (auto &Pair : Deallocations) (Pair.first)(Pair.second); + Deallocations.clear(); // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed // because they can contain DenseMaps. @@ -1001,6 +913,7 @@ ASTContext::~ASTContext() { // Increment in loop to prevent using deallocated memory. if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) R->Destroy(*this); + ObjCLayouts.clear(); for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) { @@ -1008,16 +921,21 @@ ASTContext::~ASTContext() { if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) R->Destroy(*this); } + ASTRecordLayouts.clear(); for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(), AEnd = DeclAttrs.end(); A != AEnd; ++A) A->second->~AttrVec(); + DeclAttrs.clear(); for (const auto &Value : ModuleInitializers) Value.second->~PerModuleInitializers(); + ModuleInitializers.clear(); } +ASTContext::~ASTContext() { cleanup(); } + void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) { TraversalScope = TopLevelDecls; getParentMapContext().clear(); @@ -1112,7 +1030,7 @@ void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) { for (Module *&M : Merged) if (!Found.insert(M).second) M = nullptr; - Merged.erase(std::remove(Merged.begin(), Merged.end(), nullptr), Merged.end()); + llvm::erase(Merged, nullptr); } ArrayRef<Module *> @@ -1120,7 +1038,7 @@ ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) { auto MergedIt = MergedDefModules.find(cast<NamedDecl>(Def->getCanonicalDecl())); if (MergedIt == MergedDefModules.end()) - return None; + return std::nullopt; return MergedIt->second; } @@ -1167,7 +1085,8 @@ void ASTContext::addModuleInitializer(Module *M, Decl *D) { Inits->Initializers.push_back(D); } -void ASTContext::addLazyModuleInitializers(Module *M, ArrayRef<uint32_t> IDs) { +void ASTContext::addLazyModuleInitializers(Module *M, + ArrayRef<GlobalDeclID> IDs) { auto *&Inits = ModuleInitializers[M]; if (!Inits) Inits = new (*this) PerModuleInitializers; @@ -1178,13 +1097,45 @@ void ASTContext::addLazyModuleInitializers(Module *M, ArrayRef<uint32_t> IDs) { ArrayRef<Decl *> ASTContext::getModuleInitializers(Module *M) { auto It = ModuleInitializers.find(M); if (It == ModuleInitializers.end()) - return None; + return std::nullopt; auto *Inits = It->second; Inits->resolve(*this); return Inits->Initializers; } +void ASTContext::setCurrentNamedModule(Module *M) { + assert(M->isNamedModule()); + assert(!CurrentCXXNamedModule && + "We should set named module for ASTContext for only once"); + CurrentCXXNamedModule = M; +} + +bool ASTContext::isInSameModule(const Module *M1, const Module *M2) { + if (!M1 != !M2) + return false; + + /// Get the representative module for M. The representative module is the + /// first module unit for a specific primary module name. So that the module + /// units have the same representative module belongs to the same module. + /// + /// The process is helpful to reduce the expensive string operations. + auto GetRepresentativeModule = [this](const Module *M) { + auto Iter = SameModuleLookupSet.find(M); + if (Iter != SameModuleLookupSet.end()) + return Iter->second; + + const Module *RepresentativeModule = + PrimaryModuleNameMap.try_emplace(M->getPrimaryModuleInterfaceName(), M) + .first->second; + SameModuleLookupSet[M] = RepresentativeModule; + return RepresentativeModule; + }; + + assert(M1 && "Shouldn't call `isInSameModule` if both M1 and M2 are none."); + return GetRepresentativeModule(M1) == GetRepresentativeModule(M2); +} + ExternCContextDecl *ASTContext::getExternCContextDecl() const { if (!ExternCContext) ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl()); @@ -1258,7 +1209,7 @@ TypedefDecl *ASTContext::getUInt128Decl() const { } void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) { - auto *Ty = new (*this, TypeAlignment) BuiltinType(K); + auto *Ty = new (*this, alignof(BuiltinType)) BuiltinType(K); R = CanQualType::CreateUnsafe(QualType(Ty, 0)); Types.push_back(Ty); } @@ -1273,7 +1224,6 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target, this->AuxTarget = AuxTarget; ABI.reset(createCXXABI(Target)); - AddrSpaceMap = getAddressSpaceMap(Target, LangOpts); AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts); // C99 6.2.5p19. @@ -1308,6 +1258,9 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target, // GNU extension, __float128 for IEEE quadruple precision InitBuiltinType(Float128Ty, BuiltinType::Float128); + // __ibm128 for IBM extended precision + InitBuiltinType(Ibm128Ty, BuiltinType::Ibm128); + // C11 extension ISO/IEC TS 18661-3 InitBuiltinType(Float16Ty, BuiltinType::Float16); @@ -1381,6 +1334,9 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target, // Placeholder type for bound members. InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember); + // Placeholder type for unresolved templates. + InitBuiltinType(UnresolvedTemplateTy, BuiltinType::UnresolvedTemplate); + // Placeholder type for pseudo-objects. InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject); @@ -1395,19 +1351,18 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target, // Placeholder type for OMP array sections. if (LangOpts.OpenMP) { - InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection); + InitBuiltinType(ArraySectionTy, BuiltinType::ArraySection); InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping); InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator); } + // Placeholder type for OpenACC array sections, if we are ALSO in OMP mode, + // don't bother, as we're just using the same type as OMP. + if (LangOpts.OpenACC && !LangOpts.OpenMP) { + InitBuiltinType(ArraySectionTy, BuiltinType::ArraySection); + } if (LangOpts.MatrixTypes) InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx); - // C99 6.2.5p11. - FloatComplexTy = getComplexType(FloatTy); - DoubleComplexTy = getComplexType(DoubleTy); - LongDoubleComplexTy = getComplexType(LongDoubleTy); - Float128ComplexTy = getComplexType(Float128Ty); - // Builtin types for 'id', 'Class', and 'SEL'. InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId); InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass); @@ -1429,19 +1384,17 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target, #include "clang/Basic/OpenCLExtensionTypes.def" } - if (Target.hasAArch64SVETypes()) { + if (Target.hasAArch64SVETypes() || + (AuxTarget && AuxTarget->hasAArch64SVETypes())) { #define SVE_TYPE(Name, Id, SingletonId) \ InitBuiltinType(SingletonId, BuiltinType::Id); #include "clang/Basic/AArch64SVEACLETypes.def" } - if (Target.getTriple().isPPC64() && - Target.hasFeature("paired-vector-memops")) { - if (Target.hasFeature("mma")) { + if (Target.getTriple().isPPC64()) { #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \ InitBuiltinType(Id##Ty, BuiltinType::Id); #include "clang/Basic/PPCTypes.def" - } #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \ InitBuiltinType(Id##Ty, BuiltinType::Id); #include "clang/Basic/PPCTypes.def" @@ -1453,6 +1406,19 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target, #include "clang/Basic/RISCVVTypes.def" } + if (Target.getTriple().isWasm() && Target.hasFeature("reference-types")) { +#define WASM_TYPE(Name, Id, SingletonId) \ + InitBuiltinType(SingletonId, BuiltinType::Id); +#include "clang/Basic/WebAssemblyReferenceTypes.def" + } + + if (Target.getTriple().isAMDGPU() || + (AuxTarget && AuxTarget->getTriple().isAMDGPU())) { +#define AMDGPU_TYPE(Name, Id, SingletonId) \ + InitBuiltinType(SingletonId, BuiltinType::Id); +#include "clang/Basic/AMDGPUTypes.def" + } + // Builtin type for __objc_yes and __objc_no ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ? SignedCharTy : BoolTy); @@ -1550,11 +1516,7 @@ ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst, NamedDecl * ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) { - auto Pos = InstantiatedFromUsingDecl.find(UUD); - if (Pos == InstantiatedFromUsingDecl.end()) - return nullptr; - - return Pos->second; + return InstantiatedFromUsingDecl.lookup(UUD); } void @@ -1573,11 +1535,7 @@ ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) { UsingEnumDecl * ASTContext::getInstantiatedFromUsingEnumDecl(UsingEnumDecl *UUD) { - auto Pos = InstantiatedFromUsingEnumDecl.find(UUD); - if (Pos == InstantiatedFromUsingEnumDecl.end()) - return nullptr; - - return Pos->second; + return InstantiatedFromUsingEnumDecl.lookup(UUD); } void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst, @@ -1588,12 +1546,7 @@ void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst, UsingShadowDecl * ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) { - llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos - = InstantiatedFromUsingShadowDecl.find(Inst); - if (Pos == InstantiatedFromUsingShadowDecl.end()) - return nullptr; - - return Pos->second; + return InstantiatedFromUsingShadowDecl.lookup(Inst); } void @@ -1604,12 +1557,7 @@ ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst, } FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) { - llvm::DenseMap<FieldDecl *, FieldDecl *>::iterator Pos - = InstantiatedFromUnnamedFieldDecl.find(Field); - if (Pos == InstantiatedFromUnnamedFieldDecl.end()) - return nullptr; - - return Pos->second; + return InstantiatedFromUnnamedFieldDecl.lookup(Field); } void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst, @@ -1700,16 +1648,19 @@ const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const { case BuiltinType::BFloat16: return Target->getBFloat16Format(); case BuiltinType::Float16: + return Target->getHalfFormat(); case BuiltinType::Half: return Target->getHalfFormat(); case BuiltinType::Float: return Target->getFloatFormat(); case BuiltinType::Double: return Target->getDoubleFormat(); + case BuiltinType::Ibm128: + return Target->getIbm128Format(); case BuiltinType::LongDouble: - if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice) + if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice) return AuxTarget->getLongDoubleFormat(); return Target->getLongDoubleFormat(); case BuiltinType::Float128: - if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice) + if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice) return AuxTarget->getFloat128Format(); return Target->getFloat128Format(); } @@ -1718,28 +1669,22 @@ const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const { CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const { unsigned Align = Target->getCharWidth(); - bool UseAlignAttrOnly = false; - if (unsigned AlignFromAttr = D->getMaxAlignment()) { + const unsigned AlignFromAttr = D->getMaxAlignment(); + if (AlignFromAttr) Align = AlignFromAttr; - // __attribute__((aligned)) can increase or decrease alignment - // *except* on a struct or struct member, where it only increases - // alignment unless 'packed' is also specified. - // - // It is an error for alignas to decrease alignment, so we can - // ignore that possibility; Sema should diagnose it. - if (isa<FieldDecl>(D)) { - UseAlignAttrOnly = D->hasAttr<PackedAttr>() || - cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>(); - } else { - UseAlignAttrOnly = true; - } - } - else if (isa<FieldDecl>(D)) - UseAlignAttrOnly = - D->hasAttr<PackedAttr>() || - cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>(); - + // __attribute__((aligned)) can increase or decrease alignment + // *except* on a struct or struct member, where it only increases + // alignment unless 'packed' is also specified. + // + // It is an error for alignas to decrease alignment, so we can + // ignore that possibility; Sema should diagnose it. + bool UseAlignAttrOnly; + if (const FieldDecl *FD = dyn_cast<FieldDecl>(D)) + UseAlignAttrOnly = + FD->hasAttr<PackedAttr>() || FD->getParent()->hasAttr<PackedAttr>(); + else + UseAlignAttrOnly = AlignFromAttr != 0; // If we're using the align attribute only, just ignore everything // else about the declaration and its type. if (UseAlignAttrOnly) { @@ -1771,14 +1716,16 @@ CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const { Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr())); if (BaseT.getQualifiers().hasUnaligned()) Align = Target->getCharWidth(); - if (const auto *VD = dyn_cast<VarDecl>(D)) { - if (VD->hasGlobalStorage() && !ForAlignof) { - uint64_t TypeSize = getTypeSize(T.getTypePtr()); - Align = std::max(Align, getTargetInfo().getMinGlobalAlign(TypeSize)); - } - } } + // Ensure miminum alignment for global variables. + if (const auto *VD = dyn_cast<VarDecl>(D)) + if (VD->hasGlobalStorage() && !ForAlignof) { + uint64_t TypeSize = + !BaseT->isIncompleteType() ? getTypeSize(T.getTypePtr()) : 0; + Align = std::max(Align, getMinGlobalAlignOfVar(TypeSize, VD)); + } + // Fields can be subject to extra alignment constraints, like if // the field is packed, the struct is packed, or the struct has a // a max-field-alignment constraint (#pragma pack). So calculate @@ -1833,7 +1780,8 @@ TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const { // of a base-class subobject. We decide whether that's possible // during class layout, so here we can just trust the layout results. if (getLangOpts().CPlusPlus) { - if (const auto *RT = T->getAs<RecordType>()) { + if (const auto *RT = T->getAs<RecordType>(); + RT && !RT->getDecl()->isInvalidDecl()) { const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl()); Info.Width = layout.getDataSize(); } @@ -1848,18 +1796,18 @@ TypeInfoChars static getConstantArrayInfoInChars(const ASTContext &Context, const ConstantArrayType *CAT) { TypeInfoChars EltInfo = Context.getTypeInfoInChars(CAT->getElementType()); - uint64_t Size = CAT->getSize().getZExtValue(); + uint64_t Size = CAT->getZExtSize(); assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <= (uint64_t)(-1)/Size) && "Overflow in array type char size evaluation"); uint64_t Width = EltInfo.Width.getQuantity() * Size; unsigned Align = EltInfo.Align.getQuantity(); if (!Context.getTargetInfo().getCXXABI().isMicrosoft() || - Context.getTargetInfo().getPointerWidth(0) == 64) + Context.getTargetInfo().getPointerWidth(LangAS::Default) == 64) Width = llvm::alignTo(Width, Align); return TypeInfoChars(CharUnits::fromQuantity(Width), CharUnits::fromQuantity(Align), - EltInfo.AlignIsRequired); + EltInfo.AlignRequirement); } TypeInfoChars ASTContext::getTypeInfoInChars(const Type *T) const { @@ -1867,16 +1815,53 @@ TypeInfoChars ASTContext::getTypeInfoInChars(const Type *T) const { return getConstantArrayInfoInChars(*this, CAT); TypeInfo Info = getTypeInfo(T); return TypeInfoChars(toCharUnitsFromBits(Info.Width), - toCharUnitsFromBits(Info.Align), - Info.AlignIsRequired); + toCharUnitsFromBits(Info.Align), Info.AlignRequirement); } TypeInfoChars ASTContext::getTypeInfoInChars(QualType T) const { return getTypeInfoInChars(T.getTypePtr()); } +bool ASTContext::isPromotableIntegerType(QualType T) const { + // HLSL doesn't promote all small integer types to int, it + // just uses the rank-based promotion rules for all types. + if (getLangOpts().HLSL) + return false; + + if (const auto *BT = T->getAs<BuiltinType>()) + switch (BT->getKind()) { + case BuiltinType::Bool: + case BuiltinType::Char_S: + case BuiltinType::Char_U: + case BuiltinType::SChar: + case BuiltinType::UChar: + case BuiltinType::Short: + case BuiltinType::UShort: + case BuiltinType::WChar_S: + case BuiltinType::WChar_U: + case BuiltinType::Char8: + case BuiltinType::Char16: + case BuiltinType::Char32: + return true; + default: + return false; + } + + // Enumerated types are promotable to their compatible integer types + // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2). + if (const auto *ET = T->getAs<EnumType>()) { + if (T->isDependentType() || ET->getDecl()->getPromotionType().isNull() || + ET->getDecl()->isScoped()) + return false; + + return true; + } + + return false; +} + bool ASTContext::isAlignmentRequired(const Type *T) const { - return getTypeInfo(T).AlignIsRequired; + return getTypeInfo(T).AlignRequirement != AlignRequirementKind::None; } bool ASTContext::isAlignmentRequired(QualType T) const { @@ -1928,8 +1913,8 @@ TypeInfo ASTContext::getTypeInfo(const Type *T) const { TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { uint64_t Width = 0; unsigned Align = 8; - bool AlignIsRequired = false; - unsigned AS = 0; + AlignRequirementKind AlignRequirement = AlignRequirementKind::None; + LangAS AS = LangAS::Default; switch (T->getTypeClass()) { #define TYPE(Class, Base) #define ABSTRACT_TYPE(Class, Base) @@ -1951,20 +1936,21 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { case Type::IncompleteArray: case Type::VariableArray: - case Type::ConstantArray: { + case Type::ConstantArray: + case Type::ArrayParameter: { // Model non-constant sized arrays as size zero, but track the alignment. uint64_t Size = 0; if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) - Size = CAT->getSize().getZExtValue(); + Size = CAT->getZExtSize(); TypeInfo EltInfo = getTypeInfo(cast<ArrayType>(T)->getElementType()); assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) && "Overflow in array type bit size evaluation"); Width = EltInfo.Width * Size; Align = EltInfo.Align; - AlignIsRequired = EltInfo.AlignIsRequired; + AlignRequirement = EltInfo.AlignRequirement; if (!getTargetInfo().getCXXABI().isMicrosoft() || - getTargetInfo().getPointerWidth(0) == 64) + getTargetInfo().getPointerWidth(LangAS::Default) == 64) Width = llvm::alignTo(Width, Align); break; } @@ -1973,25 +1959,33 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { case Type::Vector: { const auto *VT = cast<VectorType>(T); TypeInfo EltInfo = getTypeInfo(VT->getElementType()); - Width = EltInfo.Width * VT->getNumElements(); - Align = Width; + Width = VT->isExtVectorBoolType() ? VT->getNumElements() + : EltInfo.Width * VT->getNumElements(); + // Enforce at least byte size and alignment. + Width = std::max<unsigned>(8, Width); + Align = std::max<unsigned>(8, Width); + // If the alignment is not a power of 2, round up to the next power of 2. // This happens for non-power-of-2 length vectors. if (Align & (Align-1)) { - Align = llvm::NextPowerOf2(Align); + Align = llvm::bit_ceil(Align); Width = llvm::alignTo(Width, Align); } // Adjust the alignment based on the target max. uint64_t TargetVectorAlign = Target->getMaxVectorAlign(); if (TargetVectorAlign && TargetVectorAlign < Align) Align = TargetVectorAlign; - if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) + if (VT->getVectorKind() == VectorKind::SveFixedLengthData) // Adjust the alignment for fixed-length SVE vectors. This is important // for non-power-of-2 vector lengths. Align = 128; - else if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) + else if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate) // Adjust the alignment for fixed-length SVE predicates. Align = 16; + else if (VT->getVectorKind() == VectorKind::RVVFixedLengthData || + VT->getVectorKind() == VectorKind::RVVFixedLengthMask) + // Adjust the alignment for fixed-length RVV vectors. + Align = std::min<unsigned>(64, Width); break; } @@ -2062,7 +2056,7 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { case BuiltinType::Int128: case BuiltinType::UInt128: Width = 128; - Align = 128; // int128_t is 128-bit aligned on all targets. + Align = Target->getInt128Align(); break; case BuiltinType::ShortAccum: case BuiltinType::UShortAccum: @@ -2107,17 +2101,25 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { Align = Target->getLongFractAlign(); break; case BuiltinType::BFloat16: - Width = Target->getBFloat16Width(); - Align = Target->getBFloat16Align(); + if (Target->hasBFloat16Type()) { + Width = Target->getBFloat16Width(); + Align = Target->getBFloat16Align(); + } else if ((getLangOpts().SYCLIsDevice || + (getLangOpts().OpenMP && + getLangOpts().OpenMPIsTargetDevice)) && + AuxTarget->hasBFloat16Type()) { + Width = AuxTarget->getBFloat16Width(); + Align = AuxTarget->getBFloat16Align(); + } break; case BuiltinType::Float16: case BuiltinType::Half: if (Target->hasFloat16Type() || !getLangOpts().OpenMP || - !getLangOpts().OpenMPIsDevice) { + !getLangOpts().OpenMPIsTargetDevice) { Width = Target->getHalfWidth(); Align = Target->getHalfAlign(); } else { - assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && + assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice && "Expected OpenMP device compilation."); Width = AuxTarget->getHalfWidth(); Align = AuxTarget->getHalfAlign(); @@ -2131,8 +2133,12 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { Width = Target->getDoubleWidth(); Align = Target->getDoubleAlign(); break; + case BuiltinType::Ibm128: + Width = Target->getIbm128Width(); + Align = Target->getIbm128Align(); + break; case BuiltinType::LongDouble: - if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && + if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice && (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() || Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) { Width = AuxTarget->getLongDoubleWidth(); @@ -2144,25 +2150,26 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { break; case BuiltinType::Float128: if (Target->hasFloat128Type() || !getLangOpts().OpenMP || - !getLangOpts().OpenMPIsDevice) { + !getLangOpts().OpenMPIsTargetDevice) { Width = Target->getFloat128Width(); Align = Target->getFloat128Align(); } else { - assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && + assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice && "Expected OpenMP device compilation."); Width = AuxTarget->getFloat128Width(); Align = AuxTarget->getFloat128Align(); } break; case BuiltinType::NullPtr: - Width = Target->getPointerWidth(0); // C++ 3.9.1p11: sizeof(nullptr_t) - Align = Target->getPointerAlign(0); // == sizeof(void*) + // C++ 3.9.1p11: sizeof(nullptr_t) == sizeof(void*) + Width = Target->getPointerWidth(LangAS::Default); + Align = Target->getPointerAlign(LangAS::Default); break; case BuiltinType::ObjCId: case BuiltinType::ObjCClass: case BuiltinType::ObjCSel: - Width = Target->getPointerWidth(0); - Align = Target->getPointerAlign(0); + Width = Target->getPointerWidth(LangAS::Default); + Align = Target->getPointerAlign(LangAS::Default); break; case BuiltinType::OCLSampler: case BuiltinType::OCLEvent: @@ -2175,8 +2182,7 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ case BuiltinType::Id: #include "clang/Basic/OpenCLExtensionTypes.def" - AS = getTargetAddressSpace( - Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T))); + AS = Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)); Width = Target->getPointerWidth(AS); Align = Target->getPointerAlign(AS); break; @@ -2199,6 +2205,11 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { Width = 0; \ Align = 16; \ break; +#define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingletonId) \ + case BuiltinType::Id: \ + Width = 0; \ + Align = 16; \ + break; #include "clang/Basic/AArch64SVEACLETypes.def" #define PPC_VECTOR_TYPE(Name, Id, Size) \ case BuiltinType::Id: \ @@ -2207,7 +2218,7 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { break; #include "clang/Basic/PPCTypes.def" #define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \ - IsFP) \ + IsFP, IsBF) \ case BuiltinType::Id: \ Width = 0; \ Align = ElBits; \ @@ -2218,14 +2229,27 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { Align = 8; \ break; #include "clang/Basic/RISCVVTypes.def" +#define WASM_TYPE(Name, Id, SingletonId) \ + case BuiltinType::Id: \ + Width = 0; \ + Align = 8; \ + break; +#include "clang/Basic/WebAssemblyReferenceTypes.def" +#define AMDGPU_OPAQUE_PTR_TYPE(NAME, MANGLEDNAME, AS, WIDTH, ALIGN, ID, \ + SINGLETONID) \ + case BuiltinType::ID: \ + Width = WIDTH; \ + Align = ALIGN; \ + break; +#include "clang/Basic/AMDGPUTypes.def" } break; case Type::ObjCObjectPointer: - Width = Target->getPointerWidth(0); - Align = Target->getPointerAlign(0); + Width = Target->getPointerWidth(LangAS::Default); + Align = Target->getPointerAlign(LangAS::Default); break; case Type::BlockPointer: - AS = getTargetAddressSpace(cast<BlockPointerType>(T)->getPointeeType()); + AS = cast<BlockPointerType>(T)->getPointeeType().getAddressSpace(); Width = Target->getPointerWidth(AS); Align = Target->getPointerAlign(AS); break; @@ -2233,12 +2257,12 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { case Type::RValueReference: // alignof and sizeof should never enter this code path here, so we go // the pointer route. - AS = getTargetAddressSpace(cast<ReferenceType>(T)->getPointeeType()); + AS = cast<ReferenceType>(T)->getPointeeType().getAddressSpace(); Width = Target->getPointerWidth(AS); Align = Target->getPointerAlign(AS); break; case Type::Pointer: - AS = getTargetAddressSpace(cast<PointerType>(T)->getPointeeType()); + AS = cast<PointerType>(T)->getPointeeType().getAddressSpace(); Width = Target->getPointerWidth(AS); Align = Target->getPointerAlign(AS); break; @@ -2274,13 +2298,10 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { Align = toBits(Layout.getAlignment()); break; } - case Type::ExtInt: { - const auto *EIT = cast<ExtIntType>(T); - Align = - std::min(static_cast<unsigned>(std::max( - getCharWidth(), llvm::PowerOf2Ceil(EIT->getNumBits()))), - Target->getLongLongAlign()); - Width = llvm::alignTo(EIT->getNumBits(), Align); + case Type::BitInt: { + const auto *EIT = cast<BitIntType>(T); + Align = Target->getBitIntAlign(EIT->getNumBits()); + Width = Target->getBitIntWidth(EIT->getNumBits()); break; } case Type::Record: @@ -2299,7 +2320,7 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { getTypeInfo(ED->getIntegerType()->getUnqualifiedDesugaredType()); if (unsigned AttrAlign = ED->getMaxAlignment()) { Info.Align = AttrAlign; - Info.AlignIsRequired = true; + Info.AlignRequirement = AlignRequirementKind::RequiredByEnum; } return Info; } @@ -2309,7 +2330,9 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { const ASTRecordLayout &Layout = getASTRecordLayout(RD); Width = toBits(Layout.getSize()); Align = toBits(Layout.getAlignment()); - AlignIsRequired = RD->hasAttr<AlignedAttr>(); + AlignRequirement = RD->hasAttr<AlignedAttr>() + ? AlignRequirementKind::RequiredByRecord + : AlignRequirementKind::None; break; } @@ -2335,18 +2358,21 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { case Type::ObjCTypeParam: return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr()); + case Type::Using: + return getTypeInfo(cast<UsingType>(T)->desugar().getTypePtr()); + case Type::Typedef: { - const TypedefNameDecl *Typedef = cast<TypedefType>(T)->getDecl(); - TypeInfo Info = getTypeInfo(Typedef->getUnderlyingType().getTypePtr()); + const auto *TT = cast<TypedefType>(T); + TypeInfo Info = getTypeInfo(TT->desugar().getTypePtr()); // If the typedef has an aligned attribute on it, it overrides any computed // alignment we have. This violates the GCC documentation (which says that // attribute(aligned) can only round up) but matches its implementation. - if (unsigned AttrAlign = Typedef->getMaxAlignment()) { + if (unsigned AttrAlign = TT->getDecl()->getMaxAlignment()) { Align = AttrAlign; - AlignIsRequired = true; + AlignRequirement = AlignRequirementKind::RequiredByTypedef; } else { Align = Info.Align; - AlignIsRequired = Info.AlignIsRequired; + AlignRequirement = Info.AlignRequirement; } Width = Info.Width; break; @@ -2359,6 +2385,13 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { return getTypeInfo( cast<AttributedType>(T)->getEquivalentType().getTypePtr()); + case Type::CountAttributed: + return getTypeInfo(cast<CountAttributedType>(T)->desugar().getTypePtr()); + + case Type::BTFTagAttributed: + return getTypeInfo( + cast<BTFTagAttributedType>(T)->getWrappedType().getTypePtr()); + case Type::Atomic: { // Start with the base type information. TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType()); @@ -2376,8 +2409,7 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { // favorable to atomic operations: // Round the size up to a power of 2. - if (!llvm::isPowerOf2_64(Width)) - Width = llvm::NextPowerOf2(Width); + Width = llvm::bit_ceil(Width); // Set the alignment equal to the size. Align = static_cast<unsigned>(Width); @@ -2386,13 +2418,13 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { break; case Type::Pipe: - Width = Target->getPointerWidth(getTargetAddressSpace(LangAS::opencl_global)); - Align = Target->getPointerAlign(getTargetAddressSpace(LangAS::opencl_global)); + Width = Target->getPointerWidth(LangAS::opencl_global); + Align = Target->getPointerAlign(LangAS::opencl_global); break; } assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); - return TypeInfo(Width, Align, AlignIsRequired); + return TypeInfo(Width, Align, AlignRequirement); } unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const { @@ -2417,7 +2449,8 @@ unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const { } unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const { - unsigned SimdAlign = getTargetInfo().getSimdDefaultAlign(); + unsigned SimdAlign = llvm::OpenMPIRBuilder::getOpenMPDefaultSimdAlign( + getTargetInfo().getTriple(), Target->getTargetOpts().FeatureMap); return SimdAlign; } @@ -2450,7 +2483,7 @@ CharUnits ASTContext::getTypeAlignInChars(const Type *T) const { } /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a -/// type, in characters, before alignment adustments. This method does +/// type, in characters, before alignment adjustments. This method does /// not work on incomplete types. CharUnits ASTContext::getTypeUnadjustedAlignInChars(QualType T) const { return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); @@ -2478,11 +2511,18 @@ unsigned ASTContext::getPreferredTypeAlign(const Type *T) const { return ABIAlign; if (const auto *RT = T->getAs<RecordType>()) { - if (TI.AlignIsRequired || RT->getDecl()->isInvalidDecl()) + const RecordDecl *RD = RT->getDecl(); + + // When used as part of a typedef, or together with a 'packed' attribute, + // the 'aligned' attribute can be used to decrease alignment. Note that the + // 'packed' case is already taken into consideration when computing the + // alignment, we only need to handle the typedef case here. + if (TI.AlignRequirement == AlignRequirementKind::RequiredByTypedef || + RD->isInvalidDecl()) return ABIAlign; unsigned PreferredAlign = static_cast<unsigned>( - toBits(getASTRecordLayout(RT->getDecl()).PreferredAlignment)); + toBits(getASTRecordLayout(RD).PreferredAlignment)); assert(PreferredAlign >= ABIAlign && "PreferredAlign should be at least as large as ABIAlign."); return PreferredAlign; @@ -2502,7 +2542,7 @@ unsigned ASTContext::getPreferredTypeAlign(const Type *T) const { Target->defaultsToAIXPowerAlignment())) // Don't increase the alignment if an alignment attribute was specified on a // typedef declaration. - if (!TI.AlignIsRequired) + if (!TI.isAlignRequired()) return std::max(ABIAlign, (unsigned)getTypeSize(T)); return ABIAlign; @@ -2517,16 +2557,25 @@ unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const { /// getAlignOfGlobalVar - Return the alignment in bits that should be given /// to a global variable of the specified type. -unsigned ASTContext::getAlignOfGlobalVar(QualType T) const { +unsigned ASTContext::getAlignOfGlobalVar(QualType T, const VarDecl *VD) const { uint64_t TypeSize = getTypeSize(T.getTypePtr()); return std::max(getPreferredTypeAlign(T), - getTargetInfo().getMinGlobalAlign(TypeSize)); + getMinGlobalAlignOfVar(TypeSize, VD)); } /// getAlignOfGlobalVarInChars - Return the alignment in characters that /// should be given to a global variable of the specified type. -CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T) const { - return toCharUnitsFromBits(getAlignOfGlobalVar(T)); +CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T, + const VarDecl *VD) const { + return toCharUnitsFromBits(getAlignOfGlobalVar(T, VD)); +} + +unsigned ASTContext::getMinGlobalAlignOfVar(uint64_t Size, + const VarDecl *VD) const { + // Make the default handling as that of a non-weak definition in the + // current translation unit. + bool HasNonWeakDef = !VD || (VD->hasDefinition() && !VD->isWeak()); + return getTargetInfo().getMinGlobalAlign(Size, HasNonWeakDef); } CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const { @@ -2569,8 +2618,7 @@ void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI, if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass()) DeepCollectObjCIvars(SuperClass, false, Ivars); if (!leafClass) { - for (const auto *I : OI->ivars()) - Ivars.push_back(I); + llvm::append_range(Ivars, OI->ivars()); } else { auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI); for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv; @@ -2615,12 +2663,14 @@ void ASTContext::CollectInheritedProtocols(const Decl *CDecl, } static bool unionHasUniqueObjectRepresentations(const ASTContext &Context, - const RecordDecl *RD) { + const RecordDecl *RD, + bool CheckIfTriviallyCopyable) { assert(RD->isUnion() && "Must be union type"); CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl()); for (const auto *Field : RD->fields()) { - if (!Context.hasUniqueObjectRepresentations(Field->getType())) + if (!Context.hasUniqueObjectRepresentations(Field->getType(), + CheckIfTriviallyCopyable)) return false; CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType()); if (FieldSize != UnionSize) @@ -2629,124 +2679,182 @@ static bool unionHasUniqueObjectRepresentations(const ASTContext &Context, return !RD->field_empty(); } -static bool isStructEmpty(QualType Ty) { - const RecordDecl *RD = Ty->castAs<RecordType>()->getDecl(); +static int64_t getSubobjectOffset(const FieldDecl *Field, + const ASTContext &Context, + const clang::ASTRecordLayout & /*Layout*/) { + return Context.getFieldOffset(Field); +} - if (!RD->field_empty()) - return false; +static int64_t getSubobjectOffset(const CXXRecordDecl *RD, + const ASTContext &Context, + const clang::ASTRecordLayout &Layout) { + return Context.toBits(Layout.getBaseClassOffset(RD)); +} - if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) - return ClassDecl->isEmpty(); +static std::optional<int64_t> +structHasUniqueObjectRepresentations(const ASTContext &Context, + const RecordDecl *RD, + bool CheckIfTriviallyCopyable); + +static std::optional<int64_t> +getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context, + bool CheckIfTriviallyCopyable) { + if (Field->getType()->isRecordType()) { + const RecordDecl *RD = Field->getType()->getAsRecordDecl(); + if (!RD->isUnion()) + return structHasUniqueObjectRepresentations(Context, RD, + CheckIfTriviallyCopyable); + } + + // A _BitInt type may not be unique if it has padding bits + // but if it is a bitfield the padding bits are not used. + bool IsBitIntType = Field->getType()->isBitIntType(); + if (!Field->getType()->isReferenceType() && !IsBitIntType && + !Context.hasUniqueObjectRepresentations(Field->getType(), + CheckIfTriviallyCopyable)) + return std::nullopt; + + int64_t FieldSizeInBits = + Context.toBits(Context.getTypeSizeInChars(Field->getType())); + if (Field->isBitField()) { + // If we have explicit padding bits, they don't contribute bits + // to the actual object representation, so return 0. + if (Field->isUnnamedBitField()) + return 0; - return true; + int64_t BitfieldSize = Field->getBitWidthValue(Context); + if (IsBitIntType) { + if ((unsigned)BitfieldSize > + cast<BitIntType>(Field->getType())->getNumBits()) + return std::nullopt; + } else if (BitfieldSize > FieldSizeInBits) { + return std::nullopt; + } + FieldSizeInBits = BitfieldSize; + } else if (IsBitIntType && !Context.hasUniqueObjectRepresentations( + Field->getType(), CheckIfTriviallyCopyable)) { + return std::nullopt; + } + return FieldSizeInBits; +} + +static std::optional<int64_t> +getSubobjectSizeInBits(const CXXRecordDecl *RD, const ASTContext &Context, + bool CheckIfTriviallyCopyable) { + return structHasUniqueObjectRepresentations(Context, RD, + CheckIfTriviallyCopyable); +} + +template <typename RangeT> +static std::optional<int64_t> structSubobjectsHaveUniqueObjectRepresentations( + const RangeT &Subobjects, int64_t CurOffsetInBits, + const ASTContext &Context, const clang::ASTRecordLayout &Layout, + bool CheckIfTriviallyCopyable) { + for (const auto *Subobject : Subobjects) { + std::optional<int64_t> SizeInBits = + getSubobjectSizeInBits(Subobject, Context, CheckIfTriviallyCopyable); + if (!SizeInBits) + return std::nullopt; + if (*SizeInBits != 0) { + int64_t Offset = getSubobjectOffset(Subobject, Context, Layout); + if (Offset != CurOffsetInBits) + return std::nullopt; + CurOffsetInBits += *SizeInBits; + } + } + return CurOffsetInBits; } -static llvm::Optional<int64_t> +static std::optional<int64_t> structHasUniqueObjectRepresentations(const ASTContext &Context, - const RecordDecl *RD) { + const RecordDecl *RD, + bool CheckIfTriviallyCopyable) { assert(!RD->isUnion() && "Must be struct/class type"); const auto &Layout = Context.getASTRecordLayout(RD); int64_t CurOffsetInBits = 0; if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) { if (ClassDecl->isDynamicClass()) - return llvm::None; + return std::nullopt; - SmallVector<std::pair<QualType, int64_t>, 4> Bases; + SmallVector<CXXRecordDecl *, 4> Bases; for (const auto &Base : ClassDecl->bases()) { // Empty types can be inherited from, and non-empty types can potentially // have tail padding, so just make sure there isn't an error. - if (!isStructEmpty(Base.getType())) { - llvm::Optional<int64_t> Size = structHasUniqueObjectRepresentations( - Context, Base.getType()->castAs<RecordType>()->getDecl()); - if (!Size) - return llvm::None; - Bases.emplace_back(Base.getType(), Size.getValue()); - } + Bases.emplace_back(Base.getType()->getAsCXXRecordDecl()); } - llvm::sort(Bases, [&](const std::pair<QualType, int64_t> &L, - const std::pair<QualType, int64_t> &R) { - return Layout.getBaseClassOffset(L.first->getAsCXXRecordDecl()) < - Layout.getBaseClassOffset(R.first->getAsCXXRecordDecl()); + llvm::sort(Bases, [&](const CXXRecordDecl *L, const CXXRecordDecl *R) { + return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R); }); - for (const auto &Base : Bases) { - int64_t BaseOffset = Context.toBits( - Layout.getBaseClassOffset(Base.first->getAsCXXRecordDecl())); - int64_t BaseSize = Base.second; - if (BaseOffset != CurOffsetInBits) - return llvm::None; - CurOffsetInBits = BaseOffset + BaseSize; - } + std::optional<int64_t> OffsetAfterBases = + structSubobjectsHaveUniqueObjectRepresentations( + Bases, CurOffsetInBits, Context, Layout, CheckIfTriviallyCopyable); + if (!OffsetAfterBases) + return std::nullopt; + CurOffsetInBits = *OffsetAfterBases; } - for (const auto *Field : RD->fields()) { - if (!Field->getType()->isReferenceType() && - !Context.hasUniqueObjectRepresentations(Field->getType())) - return llvm::None; - - int64_t FieldSizeInBits = - Context.toBits(Context.getTypeSizeInChars(Field->getType())); - if (Field->isBitField()) { - int64_t BitfieldSize = Field->getBitWidthValue(Context); - - if (BitfieldSize > FieldSizeInBits) - return llvm::None; - FieldSizeInBits = BitfieldSize; - } - - int64_t FieldOffsetInBits = Context.getFieldOffset(Field); - - if (FieldOffsetInBits != CurOffsetInBits) - return llvm::None; - - CurOffsetInBits = FieldSizeInBits + FieldOffsetInBits; - } + std::optional<int64_t> OffsetAfterFields = + structSubobjectsHaveUniqueObjectRepresentations( + RD->fields(), CurOffsetInBits, Context, Layout, + CheckIfTriviallyCopyable); + if (!OffsetAfterFields) + return std::nullopt; + CurOffsetInBits = *OffsetAfterFields; return CurOffsetInBits; } -bool ASTContext::hasUniqueObjectRepresentations(QualType Ty) const { +bool ASTContext::hasUniqueObjectRepresentations( + QualType Ty, bool CheckIfTriviallyCopyable) const { // C++17 [meta.unary.prop]: // The predicate condition for a template specialization - // has_unique_object_representations<T> shall be - // satisfied if and only if: + // has_unique_object_representations<T> shall be satisfied if and only if: // (9.1) - T is trivially copyable, and // (9.2) - any two objects of type T with the same value have the same - // object representation, where two objects - // of array or non-union class type are considered to have the same value - // if their respective sequences of - // direct subobjects have the same values, and two objects of union type - // are considered to have the same - // value if they have the same active member and the corresponding members - // have the same value. + // object representation, where: + // - two objects of array or non-union class type are considered to have + // the same value if their respective sequences of direct subobjects + // have the same values, and + // - two objects of union type are considered to have the same value if + // they have the same active member and the corresponding members have + // the same value. // The set of scalar types for which this condition holds is - // implementation-defined. [ Note: If a type has padding - // bits, the condition does not hold; otherwise, the condition holds true - // for unsigned integral types. -- end note ] + // implementation-defined. [ Note: If a type has padding bits, the condition + // does not hold; otherwise, the condition holds true for unsigned integral + // types. -- end note ] assert(!Ty.isNull() && "Null QualType sent to unique object rep check"); // Arrays are unique only if their element type is unique. if (Ty->isArrayType()) - return hasUniqueObjectRepresentations(getBaseElementType(Ty)); + return hasUniqueObjectRepresentations(getBaseElementType(Ty), + CheckIfTriviallyCopyable); + + assert((Ty->isVoidType() || !Ty->isIncompleteType()) && + "hasUniqueObjectRepresentations should not be called with an " + "incomplete type"); // (9.1) - T is trivially copyable... - if (!Ty.isTriviallyCopyableType(*this)) + if (CheckIfTriviallyCopyable && !Ty.isTriviallyCopyableType(*this)) return false; // All integrals and enums are unique. - if (Ty->isIntegralOrEnumerationType()) + if (Ty->isIntegralOrEnumerationType()) { + // Except _BitInt types that have padding bits. + if (const auto *BIT = Ty->getAs<BitIntType>()) + return getTypeSize(BIT) == BIT->getNumBits(); + return true; + } // All other pointers are unique. if (Ty->isPointerType()) return true; - if (Ty->isMemberPointerType()) { - const auto *MPT = Ty->getAs<MemberPointerType>(); + if (const auto *MPT = Ty->getAs<MemberPointerType>()) return !ABI->getMemberPointerInfo(MPT).HasPadding; - } if (Ty->isRecordType()) { const RecordDecl *Record = Ty->castAs<RecordType>()->getDecl(); @@ -2755,13 +2863,13 @@ bool ASTContext::hasUniqueObjectRepresentations(QualType Ty) const { return false; if (Record->isUnion()) - return unionHasUniqueObjectRepresentations(*this, Record); + return unionHasUniqueObjectRepresentations(*this, Record, + CheckIfTriviallyCopyable); - Optional<int64_t> StructSize = - structHasUniqueObjectRepresentations(*this, Record); + std::optional<int64_t> StructSize = structHasUniqueObjectRepresentations( + *this, Record, CheckIfTriviallyCopyable); - return StructSize && - StructSize.getValue() == static_cast<int64_t>(getTypeSize(Ty)); + return StructSize && *StructSize == static_cast<int64_t>(getTypeSize(Ty)); } // FIXME: More cases to handle here (list by rsmith): @@ -2898,7 +3006,7 @@ TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T, auto *TInfo = (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8); - new (TInfo) TypeSourceInfo(T); + new (TInfo) TypeSourceInfo(T, DataSize); return TInfo; } @@ -2920,6 +3028,18 @@ ASTContext::getASTObjCImplementationLayout( return getObjCLayout(D->getClassInterface(), D); } +static auto getCanonicalTemplateArguments(const ASTContext &C, + ArrayRef<TemplateArgument> Args, + bool &AnyNonCanonArgs) { + SmallVector<TemplateArgument, 16> CanonArgs(Args); + for (auto &Arg : CanonArgs) { + TemplateArgument OrigArg = Arg; + Arg = C.getCanonicalTemplateArgument(Arg); + AnyNonCanonArgs |= !Arg.structurallyEquals(OrigArg); + } + return CanonArgs; +} + //===----------------------------------------------------------------------===// // Type creation/memoization methods //===----------------------------------------------------------------------===// @@ -2949,7 +3069,7 @@ ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const { (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos); } - auto *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals); + auto *eq = new (*this, alignof(ExtQuals)) ExtQuals(baseType, canon, quals); ExtQualNodes.InsertNode(eq, insertPos); return QualType(eq, fastQuals); } @@ -2980,21 +3100,27 @@ QualType ASTContext::removeAddrSpaceQualType(QualType T) const { if (!T.hasAddressSpace()) return T; - // If we are composing extended qualifiers together, merge together - // into one ExtQuals node. QualifierCollector Quals; const Type *TypeNode; + // For arrays, strip the qualifier off the element type, then reconstruct the + // array type + if (T.getTypePtr()->isArrayType()) { + T = getUnqualifiedArrayType(T, Quals); + TypeNode = T.getTypePtr(); + } else { + // If we are composing extended qualifiers together, merge together + // into one ExtQuals node. + while (T.hasAddressSpace()) { + TypeNode = Quals.strip(T); + + // If the type no longer has an address space after stripping qualifiers, + // jump out. + if (!QualType(TypeNode, 0).hasAddressSpace()) + break; - while (T.hasAddressSpace()) { - TypeNode = Quals.strip(T); - - // If the type no longer has an address space after stripping qualifiers, - // jump out. - if (!QualType(TypeNode, 0).hasAddressSpace()) - break; - - // There might be sugar in the way. Strip it and try again. - T = T.getSingleStepDesugaredType(*this); + // There might be sugar in the way. Strip it and try again. + T = T.getSingleStepDesugaredType(*this); + } } Quals.removeAddressSpace(); @@ -3008,6 +3134,300 @@ QualType ASTContext::removeAddrSpaceQualType(QualType T) const { return QualType(TypeNode, Quals.getFastQualifiers()); } +uint16_t +ASTContext::getPointerAuthVTablePointerDiscriminator(const CXXRecordDecl *RD) { + assert(RD->isPolymorphic() && + "Attempted to get vtable pointer discriminator on a monomorphic type"); + std::unique_ptr<MangleContext> MC(createMangleContext()); + SmallString<256> Str; + llvm::raw_svector_ostream Out(Str); + MC->mangleCXXVTable(RD, Out); + return llvm::getPointerAuthStableSipHash(Str); +} + +/// Encode a function type for use in the discriminator of a function pointer +/// type. We can't use the itanium scheme for this since C has quite permissive +/// rules for type compatibility that we need to be compatible with. +/// +/// Formally, this function associates every function pointer type T with an +/// encoded string E(T). Let the equivalence relation T1 ~ T2 be defined as +/// E(T1) == E(T2). E(T) is part of the ABI of values of type T. C type +/// compatibility requires equivalent treatment under the ABI, so +/// CCompatible(T1, T2) must imply E(T1) == E(T2), that is, CCompatible must be +/// a subset of ~. Crucially, however, it must be a proper subset because +/// CCompatible is not an equivalence relation: for example, int[] is compatible +/// with both int[1] and int[2], but the latter are not compatible with each +/// other. Therefore this encoding function must be careful to only distinguish +/// types if there is no third type with which they are both required to be +/// compatible. +static void encodeTypeForFunctionPointerAuth(const ASTContext &Ctx, + raw_ostream &OS, QualType QT) { + // FIXME: Consider address space qualifiers. + const Type *T = QT.getCanonicalType().getTypePtr(); + + // FIXME: Consider using the C++ type mangling when we encounter a construct + // that is incompatible with C. + + switch (T->getTypeClass()) { + case Type::Atomic: + return encodeTypeForFunctionPointerAuth( + Ctx, OS, cast<AtomicType>(T)->getValueType()); + + case Type::LValueReference: + OS << "R"; + encodeTypeForFunctionPointerAuth(Ctx, OS, + cast<ReferenceType>(T)->getPointeeType()); + return; + case Type::RValueReference: + OS << "O"; + encodeTypeForFunctionPointerAuth(Ctx, OS, + cast<ReferenceType>(T)->getPointeeType()); + return; + + case Type::Pointer: + // C11 6.7.6.1p2: + // For two pointer types to be compatible, both shall be identically + // qualified and both shall be pointers to compatible types. + // FIXME: we should also consider pointee types. + OS << "P"; + return; + + case Type::ObjCObjectPointer: + case Type::BlockPointer: + OS << "P"; + return; + + case Type::Complex: + OS << "C"; + return encodeTypeForFunctionPointerAuth( + Ctx, OS, cast<ComplexType>(T)->getElementType()); + + case Type::VariableArray: + case Type::ConstantArray: + case Type::IncompleteArray: + case Type::ArrayParameter: + // C11 6.7.6.2p6: + // For two array types to be compatible, both shall have compatible + // element types, and if both size specifiers are present, and are integer + // constant expressions, then both size specifiers shall have the same + // constant value [...] + // + // So since ElemType[N] has to be compatible ElemType[], we can't encode the + // width of the array. + OS << "A"; + return encodeTypeForFunctionPointerAuth( + Ctx, OS, cast<ArrayType>(T)->getElementType()); + + case Type::ObjCInterface: + case Type::ObjCObject: + OS << "<objc_object>"; + return; + + case Type::Enum: { + // C11 6.7.2.2p4: + // Each enumerated type shall be compatible with char, a signed integer + // type, or an unsigned integer type. + // + // So we have to treat enum types as integers. + QualType UnderlyingType = cast<EnumType>(T)->getDecl()->getIntegerType(); + return encodeTypeForFunctionPointerAuth( + Ctx, OS, UnderlyingType.isNull() ? Ctx.IntTy : UnderlyingType); + } + + case Type::FunctionNoProto: + case Type::FunctionProto: { + // C11 6.7.6.3p15: + // For two function types to be compatible, both shall specify compatible + // return types. Moreover, the parameter type lists, if both are present, + // shall agree in the number of parameters and in the use of the ellipsis + // terminator; corresponding parameters shall have compatible types. + // + // That paragraph goes on to describe how unprototyped functions are to be + // handled, which we ignore here. Unprototyped function pointers are hashed + // as though they were prototyped nullary functions since thats probably + // what the user meant. This behavior is non-conforming. + // FIXME: If we add a "custom discriminator" function type attribute we + // should encode functions as their discriminators. + OS << "F"; + const auto *FuncType = cast<FunctionType>(T); + encodeTypeForFunctionPointerAuth(Ctx, OS, FuncType->getReturnType()); + if (const auto *FPT = dyn_cast<FunctionProtoType>(FuncType)) { + for (QualType Param : FPT->param_types()) { + Param = Ctx.getSignatureParameterType(Param); + encodeTypeForFunctionPointerAuth(Ctx, OS, Param); + } + if (FPT->isVariadic()) + OS << "z"; + } + OS << "E"; + return; + } + + case Type::MemberPointer: { + OS << "M"; + const auto *MPT = T->getAs<MemberPointerType>(); + encodeTypeForFunctionPointerAuth(Ctx, OS, QualType(MPT->getClass(), 0)); + encodeTypeForFunctionPointerAuth(Ctx, OS, MPT->getPointeeType()); + return; + } + case Type::ExtVector: + case Type::Vector: + OS << "Dv" << Ctx.getTypeSizeInChars(T).getQuantity(); + break; + + // Don't bother discriminating based on these types. + case Type::Pipe: + case Type::BitInt: + case Type::ConstantMatrix: + OS << "?"; + return; + + case Type::Builtin: { + const auto *BTy = T->getAs<BuiltinType>(); + switch (BTy->getKind()) { +#define SIGNED_TYPE(Id, SingletonId) \ + case BuiltinType::Id: \ + OS << "i"; \ + return; +#define UNSIGNED_TYPE(Id, SingletonId) \ + case BuiltinType::Id: \ + OS << "i"; \ + return; +#define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id: +#define BUILTIN_TYPE(Id, SingletonId) +#include "clang/AST/BuiltinTypes.def" + llvm_unreachable("placeholder types should not appear here."); + + case BuiltinType::Half: + OS << "Dh"; + return; + case BuiltinType::Float: + OS << "f"; + return; + case BuiltinType::Double: + OS << "d"; + return; + case BuiltinType::LongDouble: + OS << "e"; + return; + case BuiltinType::Float16: + OS << "DF16_"; + return; + case BuiltinType::Float128: + OS << "g"; + return; + + case BuiltinType::Void: + OS << "v"; + return; + + case BuiltinType::ObjCId: + case BuiltinType::ObjCClass: + case BuiltinType::ObjCSel: + case BuiltinType::NullPtr: + OS << "P"; + return; + + // Don't bother discriminating based on OpenCL types. + case BuiltinType::OCLSampler: + case BuiltinType::OCLEvent: + case BuiltinType::OCLClkEvent: + case BuiltinType::OCLQueue: + case BuiltinType::OCLReserveID: + case BuiltinType::BFloat16: + case BuiltinType::VectorQuad: + case BuiltinType::VectorPair: + OS << "?"; + return; + + // Don't bother discriminating based on these seldom-used types. + case BuiltinType::Ibm128: + return; +#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ + case BuiltinType::Id: \ + return; +#include "clang/Basic/OpenCLImageTypes.def" +#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ + case BuiltinType::Id: \ + return; +#include "clang/Basic/OpenCLExtensionTypes.def" +#define SVE_TYPE(Name, Id, SingletonId) \ + case BuiltinType::Id: \ + return; +#include "clang/Basic/AArch64SVEACLETypes.def" + case BuiltinType::Dependent: + llvm_unreachable("should never get here"); + case BuiltinType::AMDGPUBufferRsrc: + case BuiltinType::WasmExternRef: +#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: +#include "clang/Basic/RISCVVTypes.def" + llvm_unreachable("not yet implemented"); + } + } + case Type::Record: { + const RecordDecl *RD = T->getAs<RecordType>()->getDecl(); + const IdentifierInfo *II = RD->getIdentifier(); + + // In C++, an immediate typedef of an anonymous struct or union + // is considered to name it for ODR purposes, but C's specification + // of type compatibility does not have a similar rule. Using the typedef + // name in function type discriminators anyway, as we do here, + // therefore technically violates the C standard: two function pointer + // types defined in terms of two typedef'd anonymous structs with + // different names are formally still compatible, but we are assigning + // them different discriminators and therefore incompatible ABIs. + // + // This is a relatively minor violation that significantly improves + // discrimination in some cases and has not caused problems in + // practice. Regardless, it is now part of the ABI in places where + // function type discrimination is used, and it can no longer be + // changed except on new platforms. + + if (!II) + if (const TypedefNameDecl *Typedef = RD->getTypedefNameForAnonDecl()) + II = Typedef->getDeclName().getAsIdentifierInfo(); + + if (!II) { + OS << "<anonymous_record>"; + return; + } + OS << II->getLength() << II->getName(); + return; + } + case Type::DeducedTemplateSpecialization: + case Type::Auto: +#define NON_CANONICAL_TYPE(Class, Base) case Type::Class: +#define DEPENDENT_TYPE(Class, Base) case Type::Class: +#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: +#define ABSTRACT_TYPE(Class, Base) +#define TYPE(Class, Base) +#include "clang/AST/TypeNodes.inc" + llvm_unreachable("unexpected non-canonical or dependent type!"); + return; + } +} + +uint16_t ASTContext::getPointerAuthTypeDiscriminator(QualType T) { + assert(!T->isDependentType() && + "cannot compute type discriminator of a dependent type"); + + SmallString<256> Str; + llvm::raw_svector_ostream Out(Str); + + if (T->isFunctionPointerType() || T->isFunctionReferenceType()) + T = T->getPointeeType(); + + if (T->isFunctionType()) { + encodeTypeForFunctionPointerAuth(*this, Out, T); + } else { + T = T.getUnqualifiedType(); + std::unique_ptr<MangleContext> MC(createMangleContext()); + MC->mangleCanonicalTypeName(T, Out); + } + + return llvm::getPointerAuthStableSipHash(Str); +} + QualType ASTContext::getObjCGCQualType(QualType T, Qualifiers::GC GCAttr) const { QualType CanT = getCanonicalType(T); @@ -3046,6 +3466,32 @@ QualType ASTContext::removePtrSizeAddrSpace(QualType T) const { return T; } +QualType ASTContext::getCountAttributedType( + QualType WrappedTy, Expr *CountExpr, bool CountInBytes, bool OrNull, + ArrayRef<TypeCoupledDeclRefInfo> DependentDecls) const { + assert(WrappedTy->isPointerType() || WrappedTy->isArrayType()); + + llvm::FoldingSetNodeID ID; + CountAttributedType::Profile(ID, WrappedTy, CountExpr, CountInBytes, OrNull); + + void *InsertPos = nullptr; + CountAttributedType *CATy = + CountAttributedTypes.FindNodeOrInsertPos(ID, InsertPos); + if (CATy) + return QualType(CATy, 0); + + QualType CanonTy = getCanonicalType(WrappedTy); + size_t Size = CountAttributedType::totalSizeToAlloc<TypeCoupledDeclRefInfo>( + DependentDecls.size()); + CATy = (CountAttributedType *)Allocate(Size, TypeAlignment); + new (CATy) CountAttributedType(WrappedTy, CanonTy, CountExpr, CountInBytes, + OrNull, DependentDecls); + Types.push_back(CATy); + CountAttributedTypes.InsertNode(CATy, InsertPos); + + return QualType(CATy, 0); +} + const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T, FunctionType::ExtInfo Info) { if (T->getExtInfo() == Info) @@ -3085,7 +3531,7 @@ void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD, /// declaration of a function with an exception specification is permitted /// and preserved. Other type sugar (for instance, typedefs) is not. QualType ASTContext::getFunctionTypeWithExceptionSpec( - QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) { + QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const { // Might have some parens. if (const auto *PT = dyn_cast<ParenType>(Orig)) return getParenType( @@ -3113,7 +3559,7 @@ QualType ASTContext::getFunctionTypeWithExceptionSpec( } bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T, - QualType U) { + QualType U) const { return hasSameType(T, U) || (getLangOpts().CPlusPlus17 && hasSameType(getFunctionTypeWithExceptionSpec(T, EST_None), @@ -3123,9 +3569,9 @@ bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T, QualType ASTContext::getFunctionTypeWithoutPtrSizes(QualType T) { if (const auto *Proto = T->getAs<FunctionProtoType>()) { QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); - SmallVector<QualType, 16> Args(Proto->param_types()); + SmallVector<QualType, 16> Args(Proto->param_types().size()); for (unsigned i = 0, n = Args.size(); i != n; ++i) - Args[i] = removePtrSizeAddrSpace(Args[i]); + Args[i] = removePtrSizeAddrSpace(Proto->param_types()[i]); return getFunctionType(RetTy, Args, Proto->getExtProtoInfo()); } @@ -3193,7 +3639,7 @@ QualType ASTContext::getComplexType(QualType T) const { ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; } - auto *New = new (*this, TypeAlignment) ComplexType(T, Canonical); + auto *New = new (*this, alignof(ComplexType)) ComplexType(T, Canonical); Types.push_back(New); ComplexTypes.InsertNode(New, InsertPos); return QualType(New, 0); @@ -3221,7 +3667,7 @@ QualType ASTContext::getPointerType(QualType T) const { PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; } - auto *New = new (*this, TypeAlignment) PointerType(T, Canonical); + auto *New = new (*this, alignof(PointerType)) PointerType(T, Canonical); Types.push_back(New); PointerTypes.InsertNode(New, InsertPos); return QualType(New, 0); @@ -3241,13 +3687,33 @@ QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const { AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!AT && "Shouldn't be in the map!"); - AT = new (*this, TypeAlignment) + AT = new (*this, alignof(AdjustedType)) AdjustedType(Type::Adjusted, Orig, New, Canonical); Types.push_back(AT); AdjustedTypes.InsertNode(AT, InsertPos); return QualType(AT, 0); } +QualType ASTContext::getDecayedType(QualType Orig, QualType Decayed) const { + llvm::FoldingSetNodeID ID; + AdjustedType::Profile(ID, Orig, Decayed); + void *InsertPos = nullptr; + AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); + if (AT) + return QualType(AT, 0); + + QualType Canonical = getCanonicalType(Decayed); + + // Get the new insert position for the node we care about. + AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!AT && "Shouldn't be in the map!"); + + AT = new (*this, alignof(DecayedType)) DecayedType(Orig, Decayed, Canonical); + Types.push_back(AT); + AdjustedTypes.InsertNode(AT, InsertPos); + return QualType(AT, 0); +} + QualType ASTContext::getDecayedType(QualType T) const { assert((T->isArrayType() || T->isFunctionType()) && "T does not decay"); @@ -3268,22 +3734,37 @@ QualType ASTContext::getDecayedType(QualType T) const { if (T->isFunctionType()) Decayed = getPointerType(T); + return getDecayedType(T, Decayed); +} + +QualType ASTContext::getArrayParameterType(QualType Ty) const { + if (Ty->isArrayParameterType()) + return Ty; + assert(Ty->isConstantArrayType() && "Ty must be an array type."); + const auto *ATy = cast<ConstantArrayType>(Ty); llvm::FoldingSetNodeID ID; - AdjustedType::Profile(ID, T, Decayed); + ATy->Profile(ID, *this, ATy->getElementType(), ATy->getZExtSize(), + ATy->getSizeExpr(), ATy->getSizeModifier(), + ATy->getIndexTypeQualifiers().getAsOpaqueValue()); void *InsertPos = nullptr; - AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); + ArrayParameterType *AT = + ArrayParameterTypes.FindNodeOrInsertPos(ID, InsertPos); if (AT) return QualType(AT, 0); - QualType Canonical = getCanonicalType(Decayed); + QualType Canonical; + if (!Ty.isCanonical()) { + Canonical = getArrayParameterType(getCanonicalType(Ty)); - // Get the new insert position for the node we care about. - AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); - assert(!AT && "Shouldn't be in the map!"); + // Get the new insert position for the node we care about. + AT = ArrayParameterTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!AT && "Shouldn't be in the map!"); + } - AT = new (*this, TypeAlignment) DecayedType(T, Decayed, Canonical); + AT = new (*this, alignof(ArrayParameterType)) + ArrayParameterType(ATy, Canonical); Types.push_back(AT); - AdjustedTypes.InsertNode(AT, InsertPos); + ArrayParameterTypes.InsertNode(AT, InsertPos); return QualType(AT, 0); } @@ -3312,7 +3793,8 @@ QualType ASTContext::getBlockPointerType(QualType T) const { BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; } - auto *New = new (*this, TypeAlignment) BlockPointerType(T, Canonical); + auto *New = + new (*this, alignof(BlockPointerType)) BlockPointerType(T, Canonical); Types.push_back(New); BlockPointerTypes.InsertNode(New, InsertPos); return QualType(New, 0); @@ -3322,8 +3804,9 @@ QualType ASTContext::getBlockPointerType(QualType T) const { /// lvalue reference to the specified type. QualType ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const { - assert(getCanonicalType(T) != OverloadTy && - "Unresolved overloaded function type"); + assert((!T->isPlaceholderType() || + T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) && + "Unresolved placeholder type"); // Unique pointers, to guarantee there is only one pointer of a particular // structure. @@ -3350,8 +3833,8 @@ ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const { assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; } - auto *New = new (*this, TypeAlignment) LValueReferenceType(T, Canonical, - SpelledAsLValue); + auto *New = new (*this, alignof(LValueReferenceType)) + LValueReferenceType(T, Canonical, SpelledAsLValue); Types.push_back(New); LValueReferenceTypes.InsertNode(New, InsertPos); @@ -3361,6 +3844,10 @@ ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const { /// getRValueReferenceType - Return the uniqued reference to the type for an /// rvalue reference to the specified type. QualType ASTContext::getRValueReferenceType(QualType T) const { + assert((!T->isPlaceholderType() || + T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) && + "Unresolved placeholder type"); + // Unique pointers, to guarantee there is only one pointer of a particular // structure. llvm::FoldingSetNodeID ID; @@ -3386,7 +3873,8 @@ QualType ASTContext::getRValueReferenceType(QualType T) const { assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; } - auto *New = new (*this, TypeAlignment) RValueReferenceType(T, Canonical); + auto *New = new (*this, alignof(RValueReferenceType)) + RValueReferenceType(T, Canonical); Types.push_back(New); RValueReferenceTypes.InsertNode(New, InsertPos); return QualType(New, 0); @@ -3416,7 +3904,8 @@ QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const { MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; } - auto *New = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical); + auto *New = new (*this, alignof(MemberPointerType)) + MemberPointerType(T, Cls, Canonical); Types.push_back(New); MemberPointerTypes.InsertNode(New, InsertPos); return QualType(New, 0); @@ -3427,7 +3916,7 @@ QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const { QualType ASTContext::getConstantArrayType(QualType EltTy, const llvm::APInt &ArySizeIn, const Expr *SizeExpr, - ArrayType::ArraySizeModifier ASM, + ArraySizeModifier ASM, unsigned IndexTypeQuals) const { assert((EltTy->isDependentType() || EltTy->isIncompleteType() || EltTy->isConstantSizeType()) && @@ -3443,8 +3932,8 @@ QualType ASTContext::getConstantArrayType(QualType EltTy, ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth()); llvm::FoldingSetNodeID ID; - ConstantArrayType::Profile(ID, *this, EltTy, ArySize, SizeExpr, ASM, - IndexTypeQuals); + ConstantArrayType::Profile(ID, *this, EltTy, ArySize.getZExtValue(), SizeExpr, + ASM, IndexTypeQuals); void *InsertPos = nullptr; if (ConstantArrayType *ATP = @@ -3455,6 +3944,7 @@ QualType ASTContext::getConstantArrayType(QualType EltTy, // is instantiation-dependent, this won't be a canonical type either, so fill // in the canonical type field. QualType Canon; + // FIXME: Check below should look for qualifiers behind sugar. if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) { SplitQualType canonSplit = getCanonicalType(EltTy).split(); Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize, nullptr, @@ -3467,11 +3957,8 @@ QualType ASTContext::getConstantArrayType(QualType EltTy, assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; } - void *Mem = Allocate( - ConstantArrayType::totalSizeToAlloc<const Expr *>(SizeExpr ? 1 : 0), - TypeAlignment); - auto *New = new (Mem) - ConstantArrayType(EltTy, Canon, ArySize, SizeExpr, ASM, IndexTypeQuals); + auto *New = ConstantArrayType::Create(*this, EltTy, Canon, ArySize, SizeExpr, + ASM, IndexTypeQuals); ConstantArrayTypes.InsertNode(New, InsertPos); Types.push_back(New); return QualType(New, 0); @@ -3524,8 +4011,10 @@ QualType ASTContext::getVariableArrayDecayedType(QualType type) const { case Type::Auto: case Type::DeducedTemplateSpecialization: case Type::PackExpansion: - case Type::ExtInt: - case Type::DependentExtInt: + case Type::PackIndexing: + case Type::BitInt: + case Type::DependentBitInt: + case Type::ArrayParameter: llvm_unreachable("type should never be variably-modified"); // These types can be variably-modified but should never need to @@ -3592,12 +4081,10 @@ QualType ASTContext::getVariableArrayDecayedType(QualType type) const { // Turn incomplete types into [*] types. case Type::IncompleteArray: { const auto *iat = cast<IncompleteArrayType>(ty); - result = getVariableArrayType( - getVariableArrayDecayedType(iat->getElementType()), - /*size*/ nullptr, - ArrayType::Normal, - iat->getIndexTypeCVRQualifiers(), - SourceRange()); + result = + getVariableArrayType(getVariableArrayDecayedType(iat->getElementType()), + /*size*/ nullptr, ArraySizeModifier::Normal, + iat->getIndexTypeCVRQualifiers(), SourceRange()); break; } @@ -3605,11 +4092,9 @@ QualType ASTContext::getVariableArrayDecayedType(QualType type) const { case Type::VariableArray: { const auto *vat = cast<VariableArrayType>(ty); result = getVariableArrayType( - getVariableArrayDecayedType(vat->getElementType()), - /*size*/ nullptr, - ArrayType::Star, - vat->getIndexTypeCVRQualifiers(), - vat->getBracketsRange()); + getVariableArrayDecayedType(vat->getElementType()), + /*size*/ nullptr, ArraySizeModifier::Star, + vat->getIndexTypeCVRQualifiers(), vat->getBracketsRange()); break; } } @@ -3620,9 +4105,8 @@ QualType ASTContext::getVariableArrayDecayedType(QualType type) const { /// getVariableArrayType - Returns a non-unique reference to the type for a /// variable array of the specified element type. -QualType ASTContext::getVariableArrayType(QualType EltTy, - Expr *NumElts, - ArrayType::ArraySizeModifier ASM, +QualType ASTContext::getVariableArrayType(QualType EltTy, Expr *NumElts, + ArraySizeModifier ASM, unsigned IndexTypeQuals, SourceRange Brackets) const { // Since we don't unique expressions, it isn't possible to unique VLA's @@ -3630,6 +4114,7 @@ QualType ASTContext::getVariableArrayType(QualType EltTy, QualType Canon; // Be sure to pull qualifiers off the element type. + // FIXME: Check below should look for qualifiers behind sugar. if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) { SplitQualType canonSplit = getCanonicalType(EltTy).split(); Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM, @@ -3637,8 +4122,8 @@ QualType ASTContext::getVariableArrayType(QualType EltTy, Canon = getQualifiedType(Canon, canonSplit.Quals); } - auto *New = new (*this, TypeAlignment) - VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets); + auto *New = new (*this, alignof(VariableArrayType)) + VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets); VariableArrayTypes.push_back(New); Types.push_back(New); @@ -3650,48 +4135,45 @@ QualType ASTContext::getVariableArrayType(QualType EltTy, /// type. QualType ASTContext::getDependentSizedArrayType(QualType elementType, Expr *numElements, - ArrayType::ArraySizeModifier ASM, + ArraySizeModifier ASM, unsigned elementTypeQuals, SourceRange brackets) const { assert((!numElements || numElements->isTypeDependent() || numElements->isValueDependent()) && "Size must be type- or value-dependent!"); - // Dependently-sized array types that do not have a specified number - // of elements will have their sizes deduced from a dependent - // initializer. We do no canonicalization here at all, which is okay - // because they can't be used in most locations. - if (!numElements) { - auto *newType - = new (*this, TypeAlignment) - DependentSizedArrayType(*this, elementType, QualType(), - numElements, ASM, elementTypeQuals, - brackets); - Types.push_back(newType); - return QualType(newType, 0); - } - - // Otherwise, we actually build a new type every time, but we - // also build a canonical type. - SplitQualType canonElementType = getCanonicalType(elementType).split(); void *insertPos = nullptr; llvm::FoldingSetNodeID ID; - DependentSizedArrayType::Profile(ID, *this, - QualType(canonElementType.Ty, 0), - ASM, elementTypeQuals, numElements); + DependentSizedArrayType::Profile( + ID, *this, numElements ? QualType(canonElementType.Ty, 0) : elementType, + ASM, elementTypeQuals, numElements); // Look for an existing type with these properties. DependentSizedArrayType *canonTy = DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos); + // Dependently-sized array types that do not have a specified number + // of elements will have their sizes deduced from a dependent + // initializer. + if (!numElements) { + if (canonTy) + return QualType(canonTy, 0); + + auto *newType = new (*this, alignof(DependentSizedArrayType)) + DependentSizedArrayType(elementType, QualType(), numElements, ASM, + elementTypeQuals, brackets); + DependentSizedArrayTypes.InsertNode(newType, insertPos); + Types.push_back(newType); + return QualType(newType, 0); + } + // If we don't have one, build one. if (!canonTy) { - canonTy = new (*this, TypeAlignment) - DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0), - QualType(), numElements, ASM, elementTypeQuals, - brackets); + canonTy = new (*this, alignof(DependentSizedArrayType)) + DependentSizedArrayType(QualType(canonElementType.Ty, 0), QualType(), + numElements, ASM, elementTypeQuals, brackets); DependentSizedArrayTypes.InsertNode(canonTy, insertPos); Types.push_back(canonTy); } @@ -3708,16 +4190,15 @@ QualType ASTContext::getDependentSizedArrayType(QualType elementType, // Otherwise, we need to build a type which follows the spelling // of the element type. - auto *sugaredType - = new (*this, TypeAlignment) - DependentSizedArrayType(*this, elementType, canon, numElements, - ASM, elementTypeQuals, brackets); + auto *sugaredType = new (*this, alignof(DependentSizedArrayType)) + DependentSizedArrayType(elementType, canon, numElements, ASM, + elementTypeQuals, brackets); Types.push_back(sugaredType); return QualType(sugaredType, 0); } QualType ASTContext::getIncompleteArrayType(QualType elementType, - ArrayType::ArraySizeModifier ASM, + ArraySizeModifier ASM, unsigned elementTypeQuals) const { llvm::FoldingSetNodeID ID; IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals); @@ -3732,6 +4213,7 @@ QualType ASTContext::getIncompleteArrayType(QualType elementType, // qualifiers off the element type. QualType canon; + // FIXME: Check below should look for qualifiers behind sugar. if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) { SplitQualType canonSplit = getCanonicalType(elementType).split(); canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0), @@ -3744,8 +4226,8 @@ QualType ASTContext::getIncompleteArrayType(QualType elementType, assert(!existing && "Shouldn't be in the map!"); (void) existing; } - auto *newType = new (*this, TypeAlignment) - IncompleteArrayType(elementType, canon, ASM, elementTypeQuals); + auto *newType = new (*this, alignof(IncompleteArrayType)) + IncompleteArrayType(elementType, canon, ASM, elementTypeQuals); IncompleteArrayTypes.InsertNode(newType, insertPos); Types.push_back(newType); @@ -3830,6 +4312,10 @@ ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const { return SVE_INT_ELTTY(64, 2, false, 4); case BuiltinType::SveBool: return SVE_ELTTY(BoolTy, 16, 1); + case BuiltinType::SveBoolx2: + return SVE_ELTTY(BoolTy, 16, 2); + case BuiltinType::SveBoolx4: + return SVE_ELTTY(BoolTy, 16, 4); case BuiltinType::SveFloat16: return SVE_ELTTY(HalfTy, 8, 1); case BuiltinType::SveFloat16x2: @@ -3871,6 +4357,9 @@ ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const { case BuiltinType::Id: \ return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \ llvm::ElementCount::getScalable(NumEls), NF}; +#define RVV_VECTOR_TYPE_BFLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \ + case BuiltinType::Id: \ + return {BFloat16Ty, llvm::ElementCount::getScalable(NumEls), NF}; #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ case BuiltinType::Id: \ return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1}; @@ -3878,11 +4367,24 @@ ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const { } } +/// getExternrefType - Return a WebAssembly externref type, which represents an +/// opaque reference to a host value. +QualType ASTContext::getWebAssemblyExternrefType() const { + if (Target->getTriple().isWasm() && Target->hasFeature("reference-types")) { +#define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \ + if (BuiltinType::Id == BuiltinType::WasmExternRef) \ + return SingletonId; +#include "clang/Basic/WebAssemblyReferenceTypes.def" + } + llvm_unreachable( + "shouldn't try to generate type externref outside WebAssembly target"); +} + /// getScalableVectorType - Return the unique reference to a scalable vector /// type of the specified element type and size. VectorType must be a built-in /// type. -QualType ASTContext::getScalableVectorType(QualType EltTy, - unsigned NumElts) const { +QualType ASTContext::getScalableVectorType(QualType EltTy, unsigned NumElts, + unsigned NumFields) const { if (Target->hasAArch64SVETypes()) { uint64_t EltTySize = getTypeSize(EltTy); #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ @@ -3900,20 +4402,24 @@ QualType ASTContext::getScalableVectorType(QualType EltTy, #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ if (EltTy->isBooleanType() && NumElts == NumEls) \ return SingletonId; +#define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingleTonId) #include "clang/Basic/AArch64SVEACLETypes.def" } else if (Target->hasRISCVVTypes()) { uint64_t EltTySize = getTypeSize(EltTy); #define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \ - IsFP) \ - if (!EltTy->isBooleanType() && \ - ((EltTy->hasIntegerRepresentation() && \ - EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ - (EltTy->hasFloatingRepresentation() && IsFP)) && \ - EltTySize == ElBits && NumElts == NumEls) \ - return SingletonId; + IsFP, IsBF) \ + if (!EltTy->isBooleanType() && \ + ((EltTy->hasIntegerRepresentation() && \ + EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ + (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \ + IsFP && !IsBF) || \ + (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \ + IsBF && !IsFP)) && \ + EltTySize == ElBits && NumElts == NumEls && NumFields == NF) \ + return SingletonId; #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ - if (EltTy->isBooleanType() && NumElts == NumEls) \ - return SingletonId; + if (EltTy->isBooleanType() && NumElts == NumEls) \ + return SingletonId; #include "clang/Basic/RISCVVTypes.def" } return QualType(); @@ -3922,8 +4428,12 @@ QualType ASTContext::getScalableVectorType(QualType EltTy, /// getVectorType - Return the unique reference to a vector type of /// the specified element type and size. VectorType must be a built-in type. QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts, - VectorType::VectorKind VecKind) const { - assert(vecType->isBuiltinType()); + VectorKind VecKind) const { + assert(vecType->isBuiltinType() || + (vecType->isBitIntType() && + // Only support _BitInt elements with byte-sized power of 2 NumBits. + llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits()) && + vecType->castAs<BitIntType>()->getNumBits() >= 8)); // Check if we've already instantiated a vector of this type. llvm::FoldingSetNodeID ID; @@ -3943,17 +4453,16 @@ QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts, VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; } - auto *New = new (*this, TypeAlignment) - VectorType(vecType, NumElts, Canonical, VecKind); + auto *New = new (*this, alignof(VectorType)) + VectorType(vecType, NumElts, Canonical, VecKind); VectorTypes.InsertNode(New, InsertPos); Types.push_back(New); return QualType(New, 0); } -QualType -ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr, - SourceLocation AttrLoc, - VectorType::VectorKind VecKind) const { +QualType ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr, + SourceLocation AttrLoc, + VectorKind VecKind) const { llvm::FoldingSetNodeID ID; DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr, VecKind); @@ -3963,13 +4472,13 @@ ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr, DependentVectorType *New; if (Canon) { - New = new (*this, TypeAlignment) DependentVectorType( - *this, VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind); + New = new (*this, alignof(DependentVectorType)) DependentVectorType( + VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind); } else { QualType CanonVecTy = getCanonicalType(VecType); if (CanonVecTy == VecType) { - New = new (*this, TypeAlignment) DependentVectorType( - *this, VecType, QualType(), SizeExpr, AttrLoc, VecKind); + New = new (*this, alignof(DependentVectorType)) + DependentVectorType(VecType, QualType(), SizeExpr, AttrLoc, VecKind); DependentVectorType *CanonCheck = DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); @@ -3980,8 +4489,8 @@ ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr, } else { QualType CanonTy = getDependentVectorType(CanonVecTy, SizeExpr, SourceLocation(), VecKind); - New = new (*this, TypeAlignment) DependentVectorType( - *this, VecType, CanonTy, SizeExpr, AttrLoc, VecKind); + New = new (*this, alignof(DependentVectorType)) + DependentVectorType(VecType, CanonTy, SizeExpr, AttrLoc, VecKind); } } @@ -3991,14 +4500,18 @@ ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr, /// getExtVectorType - Return the unique reference to an extended vector type of /// the specified element type and size. VectorType must be a built-in type. -QualType -ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const { - assert(vecType->isBuiltinType() || vecType->isDependentType()); +QualType ASTContext::getExtVectorType(QualType vecType, + unsigned NumElts) const { + assert(vecType->isBuiltinType() || vecType->isDependentType() || + (vecType->isBitIntType() && + // Only support _BitInt elements with byte-sized power of 2 NumBits. + llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits()) && + vecType->castAs<BitIntType>()->getNumBits() >= 8)); // Check if we've already instantiated a vector of this type. llvm::FoldingSetNodeID ID; VectorType::Profile(ID, vecType, NumElts, Type::ExtVector, - VectorType::GenericVector); + VectorKind::Generic); void *InsertPos = nullptr; if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) return QualType(VTP, 0); @@ -4013,8 +4526,8 @@ ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const { VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; } - auto *New = new (*this, TypeAlignment) - ExtVectorType(vecType, NumElts, Canonical); + auto *New = new (*this, alignof(ExtVectorType)) + ExtVectorType(vecType, NumElts, Canonical); VectorTypes.InsertNode(New, InsertPos); Types.push_back(New); return QualType(New, 0); @@ -4035,15 +4548,14 @@ ASTContext::getDependentSizedExtVectorType(QualType vecType, if (Canon) { // We already have a canonical version of this array type; use it as // the canonical type for a newly-built type. - New = new (*this, TypeAlignment) - DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0), - SizeExpr, AttrLoc); + New = new (*this, alignof(DependentSizedExtVectorType)) + DependentSizedExtVectorType(vecType, QualType(Canon, 0), SizeExpr, + AttrLoc); } else { QualType CanonVecTy = getCanonicalType(vecType); if (CanonVecTy == vecType) { - New = new (*this, TypeAlignment) - DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr, - AttrLoc); + New = new (*this, alignof(DependentSizedExtVectorType)) + DependentSizedExtVectorType(vecType, QualType(), SizeExpr, AttrLoc); DependentSizedExtVectorType *CanonCheck = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); @@ -4053,8 +4565,8 @@ ASTContext::getDependentSizedExtVectorType(QualType vecType, } else { QualType CanonExtTy = getDependentSizedExtVectorType(CanonVecTy, SizeExpr, SourceLocation()); - New = new (*this, TypeAlignment) DependentSizedExtVectorType( - *this, vecType, CanonExtTy, SizeExpr, AttrLoc); + New = new (*this, alignof(DependentSizedExtVectorType)) + DependentSizedExtVectorType(vecType, CanonExtTy, SizeExpr, AttrLoc); } } @@ -4087,7 +4599,7 @@ QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows, (void)NewIP; } - auto *New = new (*this, TypeAlignment) + auto *New = new (*this, alignof(ConstantMatrixType)) ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical); MatrixTypes.InsertNode(New, InsertPos); Types.push_back(New); @@ -4108,8 +4620,9 @@ QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy, DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); if (!Canon) { - Canon = new (*this, TypeAlignment) DependentSizedMatrixType( - *this, CanonElementTy, QualType(), RowExpr, ColumnExpr, AttrLoc); + Canon = new (*this, alignof(DependentSizedMatrixType)) + DependentSizedMatrixType(CanonElementTy, QualType(), RowExpr, + ColumnExpr, AttrLoc); #ifndef NDEBUG DependentSizedMatrixType *CanonCheck = DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); @@ -4127,8 +4640,8 @@ QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy, return QualType(Canon, 0); // Use Canon as the canonical type for newly-built type. - DependentSizedMatrixType *New = new (*this, TypeAlignment) - DependentSizedMatrixType(*this, ElementTy, QualType(Canon, 0), RowExpr, + DependentSizedMatrixType *New = new (*this, alignof(DependentSizedMatrixType)) + DependentSizedMatrixType(ElementTy, QualType(Canon, 0), RowExpr, ColumnExpr, AttrLoc); Types.push_back(New); return QualType(New, 0); @@ -4150,9 +4663,9 @@ QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType, DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos); if (!canonTy) { - canonTy = new (*this, TypeAlignment) - DependentAddressSpaceType(*this, canonPointeeType, - QualType(), AddrSpaceExpr, AttrLoc); + canonTy = new (*this, alignof(DependentAddressSpaceType)) + DependentAddressSpaceType(canonPointeeType, QualType(), AddrSpaceExpr, + AttrLoc); DependentAddressSpaceTypes.InsertNode(canonTy, insertPos); Types.push_back(canonTy); } @@ -4161,10 +4674,9 @@ QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType, canonTy->getAddrSpaceExpr() == AddrSpaceExpr) return QualType(canonTy, 0); - auto *sugaredType - = new (*this, TypeAlignment) - DependentAddressSpaceType(*this, PointeeType, QualType(canonTy, 0), - AddrSpaceExpr, AttrLoc); + auto *sugaredType = new (*this, alignof(DependentAddressSpaceType)) + DependentAddressSpaceType(PointeeType, QualType(canonTy, 0), + AddrSpaceExpr, AttrLoc); Types.push_back(sugaredType); return QualType(sugaredType, 0); } @@ -4180,6 +4692,13 @@ static bool isCanonicalResultType(QualType T) { QualType ASTContext::getFunctionNoProtoType(QualType ResultTy, const FunctionType::ExtInfo &Info) const { + // FIXME: This assertion cannot be enabled (yet) because the ObjC rewriter + // functionality creates a function without a prototype regardless of + // language mode (so it makes them even in C++). Once the rewriter has been + // fixed, this assertion can be enabled again. + //assert(!LangOpts.requiresStrictPrototypes() && + // "strict prototypes are disabled"); + // Unique functions, to guarantee there is only one function of a particular // structure. llvm::FoldingSetNodeID ID; @@ -4201,8 +4720,8 @@ ASTContext::getFunctionNoProtoType(QualType ResultTy, assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; } - auto *New = new (*this, TypeAlignment) - FunctionNoProtoType(ResultTy, Canonical, Info); + auto *New = new (*this, alignof(FunctionNoProtoType)) + FunctionNoProtoType(ResultTy, Canonical, Info); Types.push_back(New); FunctionNoProtoTypes.InsertNode(New, InsertPos); return QualType(New, 0); @@ -4327,7 +4846,7 @@ QualType ASTContext::getFunctionTypeInternal( case EST_Unparsed: case EST_Unevaluated: case EST_Uninstantiated: // We don't know yet. It shouldn't matter what we pick here; no-one // should ever look at this. - LLVM_FALLTHROUGH; + [[fallthrough]]; case EST_None: case EST_MSAny: case EST_NoexceptFalse: CanonicalEPI.ExceptionSpec.Type = EST_None; break; @@ -4381,20 +4900,24 @@ QualType ASTContext::getFunctionTypeInternal( EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size()); size_t Size = FunctionProtoType::totalSizeToAlloc< QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields, - FunctionType::ExceptionType, Expr *, FunctionDecl *, - FunctionProtoType::ExtParameterInfo, Qualifiers>( - NumArgs, EPI.Variadic, - FunctionProtoType::hasExtraBitfields(EPI.ExceptionSpec.Type), - ESH.NumExceptionType, ESH.NumExprPtr, ESH.NumFunctionDeclPtr, + FunctionType::FunctionTypeArmAttributes, FunctionType::ExceptionType, + Expr *, FunctionDecl *, FunctionProtoType::ExtParameterInfo, Qualifiers, + FunctionEffect, EffectConditionExpr>( + NumArgs, EPI.Variadic, EPI.requiresFunctionProtoTypeExtraBitfields(), + EPI.requiresFunctionProtoTypeArmAttributes(), ESH.NumExceptionType, + ESH.NumExprPtr, ESH.NumFunctionDeclPtr, EPI.ExtParameterInfos ? NumArgs : 0, - EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0); + EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0, EPI.FunctionEffects.size(), + EPI.FunctionEffects.conditions().size()); - auto *FTP = (FunctionProtoType *)Allocate(Size, TypeAlignment); + auto *FTP = (FunctionProtoType *)Allocate(Size, alignof(FunctionProtoType)); FunctionProtoType::ExtProtoInfo newEPI = EPI; new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI); Types.push_back(FTP); if (!Unique) FunctionProtoTypes.InsertNode(FTP, InsertPos); + if (!EPI.FunctionEffects.empty()) + AnyFunctionEffects = true; return QualType(FTP, 0); } @@ -4417,7 +4940,7 @@ QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const { assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; } - auto *New = new (*this, TypeAlignment) PipeType(T, Canonical, ReadOnly); + auto *New = new (*this, alignof(PipeType)) PipeType(T, Canonical, ReadOnly); Types.push_back(New); PipeTypes.InsertNode(New, InsertPos); return QualType(New, 0); @@ -4437,34 +4960,34 @@ QualType ASTContext::getWritePipeType(QualType T) const { return getPipeType(T, false); } -QualType ASTContext::getExtIntType(bool IsUnsigned, unsigned NumBits) const { +QualType ASTContext::getBitIntType(bool IsUnsigned, unsigned NumBits) const { llvm::FoldingSetNodeID ID; - ExtIntType::Profile(ID, IsUnsigned, NumBits); + BitIntType::Profile(ID, IsUnsigned, NumBits); void *InsertPos = nullptr; - if (ExtIntType *EIT = ExtIntTypes.FindNodeOrInsertPos(ID, InsertPos)) + if (BitIntType *EIT = BitIntTypes.FindNodeOrInsertPos(ID, InsertPos)) return QualType(EIT, 0); - auto *New = new (*this, TypeAlignment) ExtIntType(IsUnsigned, NumBits); - ExtIntTypes.InsertNode(New, InsertPos); + auto *New = new (*this, alignof(BitIntType)) BitIntType(IsUnsigned, NumBits); + BitIntTypes.InsertNode(New, InsertPos); Types.push_back(New); return QualType(New, 0); } -QualType ASTContext::getDependentExtIntType(bool IsUnsigned, +QualType ASTContext::getDependentBitIntType(bool IsUnsigned, Expr *NumBitsExpr) const { assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent"); llvm::FoldingSetNodeID ID; - DependentExtIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr); + DependentBitIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr); void *InsertPos = nullptr; - if (DependentExtIntType *Existing = - DependentExtIntTypes.FindNodeOrInsertPos(ID, InsertPos)) + if (DependentBitIntType *Existing = + DependentBitIntTypes.FindNodeOrInsertPos(ID, InsertPos)) return QualType(Existing, 0); - auto *New = new (*this, TypeAlignment) - DependentExtIntType(*this, IsUnsigned, NumBitsExpr); - DependentExtIntTypes.InsertNode(New, InsertPos); + auto *New = new (*this, alignof(DependentBitIntType)) + DependentBitIntType(IsUnsigned, NumBitsExpr); + DependentBitIntTypes.InsertNode(New, InsertPos); Types.push_back(New); return QualType(New, 0); @@ -4495,8 +5018,8 @@ QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl, Decl->TypeForDecl = PrevDecl->TypeForDecl; assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); } else { - Type *newType = - new (*this, TypeAlignment) InjectedClassNameType(Decl, TST); + Type *newType = new (*this, alignof(InjectedClassNameType)) + InjectedClassNameType(Decl, TST); Decl->TypeForDecl = newType; Types.push_back(newType); } @@ -4523,9 +5046,7 @@ QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const { assert(Enum->isFirstDecl() && "enum has previous declaration"); return getEnumType(Enum); } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) { - Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Using); - Decl->TypeForDecl = newType; - Types.push_back(newType); + return getUnresolvedUsingType(Using); } else llvm_unreachable("TypeDecl without a type?"); @@ -4536,16 +5057,63 @@ QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const { /// specified typedef name decl. QualType ASTContext::getTypedefType(const TypedefNameDecl *Decl, QualType Underlying) const { - if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); + if (!Decl->TypeForDecl) { + if (Underlying.isNull()) + Underlying = Decl->getUnderlyingType(); + auto *NewType = new (*this, alignof(TypedefType)) TypedefType( + Type::Typedef, Decl, QualType(), getCanonicalType(Underlying)); + Decl->TypeForDecl = NewType; + Types.push_back(NewType); + return QualType(NewType, 0); + } + if (Underlying.isNull() || Decl->getUnderlyingType() == Underlying) + return QualType(Decl->TypeForDecl, 0); + assert(hasSameType(Decl->getUnderlyingType(), Underlying)); - if (Underlying.isNull()) - Underlying = Decl->getUnderlyingType(); - QualType Canonical = getCanonicalType(Underlying); - auto *newType = new (*this, TypeAlignment) - TypedefType(Type::Typedef, Decl, Underlying, Canonical); - Decl->TypeForDecl = newType; - Types.push_back(newType); - return QualType(newType, 0); + llvm::FoldingSetNodeID ID; + TypedefType::Profile(ID, Decl, Underlying); + + void *InsertPos = nullptr; + if (TypedefType *T = TypedefTypes.FindNodeOrInsertPos(ID, InsertPos)) { + assert(!T->typeMatchesDecl() && + "non-divergent case should be handled with TypeDecl"); + return QualType(T, 0); + } + + void *Mem = Allocate(TypedefType::totalSizeToAlloc<QualType>(true), + alignof(TypedefType)); + auto *NewType = new (Mem) TypedefType(Type::Typedef, Decl, Underlying, + getCanonicalType(Underlying)); + TypedefTypes.InsertNode(NewType, InsertPos); + Types.push_back(NewType); + return QualType(NewType, 0); +} + +QualType ASTContext::getUsingType(const UsingShadowDecl *Found, + QualType Underlying) const { + llvm::FoldingSetNodeID ID; + UsingType::Profile(ID, Found, Underlying); + + void *InsertPos = nullptr; + if (UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos)) + return QualType(T, 0); + + const Type *TypeForDecl = + cast<TypeDecl>(Found->getTargetDecl())->getTypeForDecl(); + + assert(!Underlying.hasLocalQualifiers()); + QualType Canon = Underlying->getCanonicalTypeInternal(); + assert(TypeForDecl->getCanonicalTypeInternal() == Canon); + + if (Underlying.getTypePtr() == TypeForDecl) + Underlying = QualType(); + void *Mem = + Allocate(UsingType::totalSizeToAlloc<QualType>(!Underlying.isNull()), + alignof(UsingType)); + UsingType *NewType = new (Mem) UsingType(Found, Underlying, Canon); + Types.push_back(NewType); + UsingTypes.InsertNode(NewType, InsertPos); + return QualType(NewType, 0); } QualType ASTContext::getRecordType(const RecordDecl *Decl) const { @@ -4555,7 +5123,7 @@ QualType ASTContext::getRecordType(const RecordDecl *Decl) const { if (PrevDecl->TypeForDecl) return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); - auto *newType = new (*this, TypeAlignment) RecordType(Decl); + auto *newType = new (*this, alignof(RecordType)) RecordType(Decl); Decl->TypeForDecl = newType; Types.push_back(newType); return QualType(newType, 0); @@ -4568,7 +5136,24 @@ QualType ASTContext::getEnumType(const EnumDecl *Decl) const { if (PrevDecl->TypeForDecl) return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); - auto *newType = new (*this, TypeAlignment) EnumType(Decl); + auto *newType = new (*this, alignof(EnumType)) EnumType(Decl); + Decl->TypeForDecl = newType; + Types.push_back(newType); + return QualType(newType, 0); +} + +QualType ASTContext::getUnresolvedUsingType( + const UnresolvedUsingTypenameDecl *Decl) const { + if (Decl->TypeForDecl) + return QualType(Decl->TypeForDecl, 0); + + if (const UnresolvedUsingTypenameDecl *CanonicalDecl = + Decl->getCanonicalDecl()) + if (CanonicalDecl->TypeForDecl) + return QualType(Decl->TypeForDecl = CanonicalDecl->TypeForDecl, 0); + + Type *newType = + new (*this, alignof(UnresolvedUsingType)) UnresolvedUsingType(Decl); Decl->TypeForDecl = newType; Types.push_back(newType); return QualType(newType, 0); @@ -4576,7 +5161,7 @@ QualType ASTContext::getEnumType(const EnumDecl *Decl) const { QualType ASTContext::getAttributedType(attr::Kind attrKind, QualType modifiedType, - QualType equivalentType) { + QualType equivalentType) const { llvm::FoldingSetNodeID id; AttributedType::Profile(id, attrKind, modifiedType, equivalentType); @@ -4585,7 +5170,7 @@ QualType ASTContext::getAttributedType(attr::Kind attrKind, if (type) return QualType(type, 0); QualType canon = getCanonicalType(equivalentType); - type = new (*this, TypeAlignment) + type = new (*this, alignof(AttributedType)) AttributedType(canon, attrKind, modifiedType, equivalentType); Types.push_back(type); @@ -4594,22 +5179,44 @@ QualType ASTContext::getAttributedType(attr::Kind attrKind, return QualType(type, 0); } -/// Retrieve a substitution-result type. -QualType -ASTContext::getSubstTemplateTypeParmType(const TemplateTypeParmType *Parm, - QualType Replacement) const { - assert(Replacement.isCanonical() - && "replacement types must always be canonical"); +QualType ASTContext::getBTFTagAttributedType(const BTFTypeTagAttr *BTFAttr, + QualType Wrapped) { + llvm::FoldingSetNodeID ID; + BTFTagAttributedType::Profile(ID, Wrapped, BTFAttr); + + void *InsertPos = nullptr; + BTFTagAttributedType *Ty = + BTFTagAttributedTypes.FindNodeOrInsertPos(ID, InsertPos); + if (Ty) + return QualType(Ty, 0); + + QualType Canon = getCanonicalType(Wrapped); + Ty = new (*this, alignof(BTFTagAttributedType)) + BTFTagAttributedType(Canon, Wrapped, BTFAttr); + + Types.push_back(Ty); + BTFTagAttributedTypes.InsertNode(Ty, InsertPos); + + return QualType(Ty, 0); +} +/// Retrieve a substitution-result type. +QualType ASTContext::getSubstTemplateTypeParmType( + QualType Replacement, Decl *AssociatedDecl, unsigned Index, + std::optional<unsigned> PackIndex) const { llvm::FoldingSetNodeID ID; - SubstTemplateTypeParmType::Profile(ID, Parm, Replacement); + SubstTemplateTypeParmType::Profile(ID, Replacement, AssociatedDecl, Index, + PackIndex); void *InsertPos = nullptr; - SubstTemplateTypeParmType *SubstParm - = SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); + SubstTemplateTypeParmType *SubstParm = + SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); if (!SubstParm) { - SubstParm = new (*this, TypeAlignment) - SubstTemplateTypeParmType(Parm, Replacement); + void *Mem = Allocate(SubstTemplateTypeParmType::totalSizeToAlloc<QualType>( + !Replacement.isCanonical()), + alignof(SubstTemplateTypeParmType)); + SubstParm = new (Mem) SubstTemplateTypeParmType(Replacement, AssociatedDecl, + Index, PackIndex); Types.push_back(SubstParm); SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos); } @@ -4618,34 +5225,39 @@ ASTContext::getSubstTemplateTypeParmType(const TemplateTypeParmType *Parm, } /// Retrieve a -QualType ASTContext::getSubstTemplateTypeParmPackType( - const TemplateTypeParmType *Parm, - const TemplateArgument &ArgPack) { +QualType +ASTContext::getSubstTemplateTypeParmPackType(Decl *AssociatedDecl, + unsigned Index, bool Final, + const TemplateArgument &ArgPack) { #ifndef NDEBUG - for (const auto &P : ArgPack.pack_elements()) { - assert(P.getKind() == TemplateArgument::Type &&"Pack contains a non-type"); - assert(P.getAsType().isCanonical() && "Pack contains non-canonical type"); - } + for (const auto &P : ArgPack.pack_elements()) + assert(P.getKind() == TemplateArgument::Type && "Pack contains a non-type"); #endif llvm::FoldingSetNodeID ID; - SubstTemplateTypeParmPackType::Profile(ID, Parm, ArgPack); + SubstTemplateTypeParmPackType::Profile(ID, AssociatedDecl, Index, Final, + ArgPack); void *InsertPos = nullptr; - if (SubstTemplateTypeParmPackType *SubstParm - = SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos)) + if (SubstTemplateTypeParmPackType *SubstParm = + SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos)) return QualType(SubstParm, 0); QualType Canon; - if (!Parm->isCanonicalUnqualified()) { - Canon = getCanonicalType(QualType(Parm, 0)); - Canon = getSubstTemplateTypeParmPackType(cast<TemplateTypeParmType>(Canon), - ArgPack); - SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos); + { + TemplateArgument CanonArgPack = getCanonicalTemplateArgument(ArgPack); + if (!AssociatedDecl->isCanonicalDecl() || + !CanonArgPack.structurallyEquals(ArgPack)) { + Canon = getSubstTemplateTypeParmPackType( + AssociatedDecl->getCanonicalDecl(), Index, Final, CanonArgPack); + [[maybe_unused]] const auto *Nothing = + SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!Nothing); + } } - auto *SubstParm - = new (*this, TypeAlignment) SubstTemplateTypeParmPackType(Parm, Canon, - ArgPack); + auto *SubstParm = new (*this, alignof(SubstTemplateTypeParmPackType)) + SubstTemplateTypeParmPackType(Canon, AssociatedDecl, Index, Final, + ArgPack); Types.push_back(SubstParm); SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos); return QualType(SubstParm, 0); @@ -4668,15 +5280,16 @@ QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index, if (TTPDecl) { QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack); - TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon); + TypeParm = new (*this, alignof(TemplateTypeParmType)) + TemplateTypeParmType(TTPDecl, Canon); TemplateTypeParmType *TypeCheck = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!TypeCheck && "Template type parameter canonical type broken"); (void)TypeCheck; } else - TypeParm = new (*this, TypeAlignment) - TemplateTypeParmType(Depth, Index, ParameterPack); + TypeParm = new (*this, alignof(TemplateTypeParmType)) + TemplateTypeParmType(Depth, Index, ParameterPack); Types.push_back(TypeParm); TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos); @@ -4691,7 +5304,8 @@ ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name, QualType Underlying) const { assert(!Name.getAsDependentTemplateName() && "No dependent template names here!"); - QualType TST = getTemplateSpecializationType(Name, Args, Underlying); + QualType TST = + getTemplateSpecializationType(Name, Args.arguments(), Underlying); TypeSourceInfo *DI = CreateTypeSourceInfo(TST); TemplateSpecializationTypeLoc TL = @@ -4707,14 +5321,14 @@ ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name, QualType ASTContext::getTemplateSpecializationType(TemplateName Template, - const TemplateArgumentListInfo &Args, + ArrayRef<TemplateArgumentLoc> Args, QualType Underlying) const { assert(!Template.getAsDependentTemplateName() && "No dependent template names here!"); SmallVector<TemplateArgument, 4> ArgVec; ArgVec.reserve(Args.size()); - for (const TemplateArgumentLoc &Arg : Args.arguments()) + for (const TemplateArgumentLoc &Arg : Args) ArgVec.push_back(Arg.getArgument()); return getTemplateSpecializationType(Template, ArgVec, Underlying); @@ -4736,13 +5350,9 @@ ASTContext::getTemplateSpecializationType(TemplateName Template, QualType Underlying) const { assert(!Template.getAsDependentTemplateName() && "No dependent template names here!"); - // Look through qualified template names. - if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) - Template = TemplateName(QTN->getTemplateDecl()); - bool IsTypeAlias = - Template.getAsTemplateDecl() && - isa<TypeAliasTemplateDecl>(Template.getAsTemplateDecl()); + const auto *TD = Template.getAsTemplateDecl(); + bool IsTypeAlias = TD && TD->isTypeAlias(); QualType CanonType; if (!Underlying.isNull()) CanonType = getCanonicalType(Underlying); @@ -4759,9 +5369,9 @@ ASTContext::getTemplateSpecializationType(TemplateName Template, // try to unique it: these types typically have location information that // we don't unique and don't want to lose. void *Mem = Allocate(sizeof(TemplateSpecializationType) + - sizeof(TemplateArgument) * Args.size() + - (IsTypeAlias? sizeof(QualType) : 0), - TypeAlignment); + sizeof(TemplateArgument) * Args.size() + + (IsTypeAlias ? sizeof(QualType) : 0), + alignof(TemplateSpecializationType)); auto *Spec = new (Mem) TemplateSpecializationType(Template, Args, CanonType, IsTypeAlias ? Underlying : QualType()); @@ -4775,17 +5385,11 @@ QualType ASTContext::getCanonicalTemplateSpecializationType( assert(!Template.getAsDependentTemplateName() && "No dependent template names here!"); - // Look through qualified template names. - if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) - Template = TemplateName(QTN->getTemplateDecl()); - // Build the canonical template specialization type. TemplateName CanonTemplate = getCanonicalTemplateName(Template); - SmallVector<TemplateArgument, 4> CanonArgs; - unsigned NumArgs = Args.size(); - CanonArgs.reserve(NumArgs); - for (const TemplateArgument &Arg : Args) - CanonArgs.push_back(getCanonicalTemplateArgument(Arg)); + bool AnyNonCanonArgs = false; + auto CanonArgs = + ::getCanonicalTemplateArguments(*this, Args, AnyNonCanonArgs); // Determine whether this canonical template specialization type already // exists. @@ -4800,8 +5404,8 @@ QualType ASTContext::getCanonicalTemplateSpecializationType( if (!Spec) { // Allocate a new canonical template specialization type. void *Mem = Allocate((sizeof(TemplateSpecializationType) + - sizeof(TemplateArgument) * NumArgs), - TypeAlignment); + sizeof(TemplateArgument) * CanonArgs.size()), + alignof(TemplateSpecializationType)); Spec = new (Mem) TemplateSpecializationType(CanonTemplate, CanonArgs, QualType(), QualType()); @@ -4834,8 +5438,9 @@ QualType ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword, (void)CheckT; } - void *Mem = Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl), - TypeAlignment); + void *Mem = + Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl), + alignof(ElaboratedType)); T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl); Types.push_back(T); @@ -4861,7 +5466,7 @@ ASTContext::getParenType(QualType InnerType) const { (void)CheckT; } - T = new (*this, TypeAlignment) ParenType(InnerType, Canon); + T = new (*this, alignof(ParenType)) ParenType(InnerType, Canon); Types.push_back(T); ParenTypes.InsertNode(T, InsertPos); return QualType(T, 0); @@ -4874,7 +5479,7 @@ ASTContext::getMacroQualifiedType(QualType UnderlyingTy, if (!Canon.isCanonical()) Canon = getCanonicalType(UnderlyingTy); - auto *newType = new (*this, TypeAlignment) + auto *newType = new (*this, alignof(MacroQualifiedType)) MacroQualifiedType(UnderlyingTy, Canon, MacroII); Types.push_back(newType); return QualType(newType, 0); @@ -4899,18 +5504,16 @@ QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword, if (T) return QualType(T, 0); - T = new (*this, TypeAlignment) DependentNameType(Keyword, NNS, Name, Canon); + T = new (*this, alignof(DependentNameType)) + DependentNameType(Keyword, NNS, Name, Canon); Types.push_back(T); DependentNameTypes.InsertNode(T, InsertPos); return QualType(T, 0); } -QualType -ASTContext::getDependentTemplateSpecializationType( - ElaboratedTypeKeyword Keyword, - NestedNameSpecifier *NNS, - const IdentifierInfo *Name, - const TemplateArgumentListInfo &Args) const { +QualType ASTContext::getDependentTemplateSpecializationType( + ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS, + const IdentifierInfo *Name, ArrayRef<TemplateArgumentLoc> Args) const { // TODO: avoid this copy SmallVector<TemplateArgument, 16> ArgCopy; for (unsigned I = 0, E = Args.size(); I != E; ++I) @@ -4940,16 +5543,12 @@ ASTContext::getDependentTemplateSpecializationType( NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); ElaboratedTypeKeyword CanonKeyword = Keyword; - if (Keyword == ETK_None) CanonKeyword = ETK_Typename; + if (Keyword == ElaboratedTypeKeyword::None) + CanonKeyword = ElaboratedTypeKeyword::Typename; bool AnyNonCanonArgs = false; - unsigned NumArgs = Args.size(); - SmallVector<TemplateArgument, 16> CanonArgs(NumArgs); - for (unsigned I = 0; I != NumArgs; ++I) { - CanonArgs[I] = getCanonicalTemplateArgument(Args[I]); - if (!CanonArgs[I].structurallyEquals(Args[I])) - AnyNonCanonArgs = true; - } + auto CanonArgs = + ::getCanonicalTemplateArguments(*this, Args, AnyNonCanonArgs); QualType Canon; if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) { @@ -4958,12 +5557,14 @@ ASTContext::getDependentTemplateSpecializationType( CanonArgs); // Find the insert position again. - DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); + [[maybe_unused]] auto *Nothing = + DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!Nothing && "canonical type broken"); } void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) + - sizeof(TemplateArgument) * NumArgs), - TypeAlignment); + sizeof(TemplateArgument) * Args.size()), + alignof(DependentTemplateSpecializationType)); T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS, Name, Args, Canon); Types.push_back(T); @@ -4976,7 +5577,7 @@ TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) { if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) { QualType ArgType = getTypeDeclType(TTP); if (TTP->isParameterPack()) - ArgType = getPackExpansionType(ArgType, None); + ArgType = getPackExpansionType(ArgType, std::nullopt); Arg = TemplateArgument(ArgType); } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) { @@ -4989,19 +5590,21 @@ TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) { if (T->isRecordType()) T.addConst(); Expr *E = new (*this) DeclRefExpr( - *this, NTTP, /*enclosing*/ false, T, + *this, NTTP, /*RefersToEnclosingVariableOrCapture*/ false, T, Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation()); if (NTTP->isParameterPack()) - E = new (*this) PackExpansionExpr(DependentTy, E, NTTP->getLocation(), - None); + E = new (*this) + PackExpansionExpr(DependentTy, E, NTTP->getLocation(), std::nullopt); Arg = TemplateArgument(E); } else { auto *TTP = cast<TemplateTemplateParmDecl>(Param); + TemplateName Name = getQualifiedTemplateName( + nullptr, /*TemplateKeyword=*/false, TemplateName(TTP)); if (TTP->isParameterPack()) - Arg = TemplateArgument(TemplateName(TTP), Optional<unsigned>()); + Arg = TemplateArgument(Name, std::optional<unsigned>()); else - Arg = TemplateArgument(TemplateName(TTP)); + Arg = TemplateArgument(Name); } if (Param->isTemplateParameterPack()) @@ -5020,7 +5623,7 @@ ASTContext::getInjectedTemplateArgs(const TemplateParameterList *Params, } QualType ASTContext::getPackExpansionType(QualType Pattern, - Optional<unsigned> NumExpansions, + std::optional<unsigned> NumExpansions, bool ExpectPackInType) { assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) && "Pack expansions must expand one or more parameter packs"); @@ -5043,7 +5646,7 @@ QualType ASTContext::getPackExpansionType(QualType Pattern, PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); } - T = new (*this, TypeAlignment) + T = new (*this, alignof(PackExpansionType)) PackExpansionType(Pattern, Canon, NumExpansions); Types.push_back(T); PackExpansionTypes.InsertNode(T, InsertPos); @@ -5088,7 +5691,7 @@ QualType ASTContext::getObjCObjectType(QualType BaseType, ObjCProtocolDecl * const *Protocols, unsigned NumProtocols) const { return getObjCObjectType(BaseType, {}, - llvm::makeArrayRef(Protocols, NumProtocols), + llvm::ArrayRef(Protocols, NumProtocols), /*isKindOf=*/false); } @@ -5123,11 +5726,8 @@ QualType ASTContext::getObjCObjectType( // sorted-and-uniqued list of protocols and the type arguments // canonicalized. QualType canonical; - bool typeArgsAreCanonical = std::all_of(effectiveTypeArgs.begin(), - effectiveTypeArgs.end(), - [&](QualType type) { - return type.isCanonical(); - }); + bool typeArgsAreCanonical = llvm::all_of( + effectiveTypeArgs, [&](QualType type) { return type.isCanonical(); }); bool protocolsSorted = areSortedAndUniqued(protocols); if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) { // Determine the canonical type arguments. @@ -5162,7 +5762,7 @@ QualType ASTContext::getObjCObjectType( unsigned size = sizeof(ObjCObjectTypeImpl); size += typeArgs.size() * sizeof(QualType); size += protocols.size() * sizeof(ObjCProtocolDecl *); - void *mem = Allocate(size, TypeAlignment); + void *mem = Allocate(size, alignof(ObjCObjectTypeImpl)); auto *T = new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols, isKindOf); @@ -5269,7 +5869,7 @@ ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl, unsigned size = sizeof(ObjCTypeParamType); size += protocols.size() * sizeof(ObjCProtocolDecl *); - void *mem = Allocate(size, TypeAlignment); + void *mem = Allocate(size, alignof(ObjCTypeParamType)); auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols); Types.push_back(newType); @@ -5375,7 +5975,8 @@ QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const { } // No match. - void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment); + void *Mem = + Allocate(sizeof(ObjCObjectPointerType), alignof(ObjCObjectPointerType)); auto *QType = new (Mem) ObjCObjectPointerType(Canonical, ObjectT); @@ -5401,7 +6002,7 @@ QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl, if (const ObjCInterfaceDecl *Def = Decl->getDefinition()) Decl = Def; - void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment); + void *Mem = Allocate(sizeof(ObjCInterfaceType), alignof(ObjCInterfaceType)); auto *T = new (Mem) ObjCInterfaceType(Decl); Decl->TypeForDecl = T; Types.push_back(T); @@ -5413,30 +6014,32 @@ QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl, /// multiple declarations that refer to "typeof(x)" all contain different /// DeclRefExpr's. This doesn't effect the type checker, since it operates /// on canonical type's (which are always unique). -QualType ASTContext::getTypeOfExprType(Expr *tofExpr) const { +QualType ASTContext::getTypeOfExprType(Expr *tofExpr, TypeOfKind Kind) const { TypeOfExprType *toe; if (tofExpr->isTypeDependent()) { llvm::FoldingSetNodeID ID; - DependentTypeOfExprType::Profile(ID, *this, tofExpr); + DependentTypeOfExprType::Profile(ID, *this, tofExpr, + Kind == TypeOfKind::Unqualified); void *InsertPos = nullptr; - DependentTypeOfExprType *Canon - = DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos); + DependentTypeOfExprType *Canon = + DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos); if (Canon) { // We already have a "canonical" version of an identical, dependent // typeof(expr) type. Use that as our canonical type. - toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, - QualType((TypeOfExprType*)Canon, 0)); + toe = new (*this, alignof(TypeOfExprType)) TypeOfExprType( + *this, tofExpr, Kind, QualType((TypeOfExprType *)Canon, 0)); } else { // Build a new, canonical typeof(expr) type. - Canon - = new (*this, TypeAlignment) DependentTypeOfExprType(*this, tofExpr); + Canon = new (*this, alignof(DependentTypeOfExprType)) + DependentTypeOfExprType(*this, tofExpr, Kind); DependentTypeOfExprTypes.InsertNode(Canon, InsertPos); toe = Canon; } } else { QualType Canonical = getCanonicalType(tofExpr->getType()); - toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, Canonical); + toe = new (*this, alignof(TypeOfExprType)) + TypeOfExprType(*this, tofExpr, Kind, Canonical); } Types.push_back(toe); return QualType(toe, 0); @@ -5447,13 +6050,37 @@ QualType ASTContext::getTypeOfExprType(Expr *tofExpr) const { /// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be /// an issue. This doesn't affect the type checker, since it operates /// on canonical types (which are always unique). -QualType ASTContext::getTypeOfType(QualType tofType) const { +QualType ASTContext::getTypeOfType(QualType tofType, TypeOfKind Kind) const { QualType Canonical = getCanonicalType(tofType); - auto *tot = new (*this, TypeAlignment) TypeOfType(tofType, Canonical); + auto *tot = new (*this, alignof(TypeOfType)) + TypeOfType(*this, tofType, Canonical, Kind); Types.push_back(tot); return QualType(tot, 0); } +/// getReferenceQualifiedType - Given an expr, will return the type for +/// that expression, as in [dcl.type.simple]p4 but without taking id-expressions +/// and class member access into account. +QualType ASTContext::getReferenceQualifiedType(const Expr *E) const { + // C++11 [dcl.type.simple]p4: + // [...] + QualType T = E->getType(); + switch (E->getValueKind()) { + // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the + // type of e; + case VK_XValue: + return getRValueReferenceType(T); + // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the + // type of e; + case VK_LValue: + return getLValueReferenceType(T); + // - otherwise, decltype(e) is the type of e. + case VK_PRValue: + return T; + } + llvm_unreachable("Unknown value kind"); +} + /// Unlike many "get<Type>" functions, we don't unique DecltypeType /// nodes. This would never be helpful, since each such type has its own /// expression, and would not give a significant memory saving, since there @@ -5474,19 +6101,53 @@ QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const { = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos); if (!Canon) { // Build a new, canonical decltype(expr) type. - Canon = new (*this, TypeAlignment) DependentDecltypeType(*this, e); + Canon = new (*this, alignof(DependentDecltypeType)) + DependentDecltypeType(e, DependentTy); DependentDecltypeTypes.InsertNode(Canon, InsertPos); } - dt = new (*this, TypeAlignment) + dt = new (*this, alignof(DecltypeType)) DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0)); } else { - dt = new (*this, TypeAlignment) + dt = new (*this, alignof(DecltypeType)) DecltypeType(e, UnderlyingType, getCanonicalType(UnderlyingType)); } Types.push_back(dt); return QualType(dt, 0); } +QualType ASTContext::getPackIndexingType(QualType Pattern, Expr *IndexExpr, + bool FullySubstituted, + ArrayRef<QualType> Expansions, + int Index) const { + QualType Canonical; + if (FullySubstituted && Index != -1) { + Canonical = getCanonicalType(Expansions[Index]); + } else { + llvm::FoldingSetNodeID ID; + PackIndexingType::Profile(ID, *this, Pattern, IndexExpr); + void *InsertPos = nullptr; + PackIndexingType *Canon = + DependentPackIndexingTypes.FindNodeOrInsertPos(ID, InsertPos); + if (!Canon) { + void *Mem = Allocate( + PackIndexingType::totalSizeToAlloc<QualType>(Expansions.size()), + TypeAlignment); + Canon = new (Mem) + PackIndexingType(*this, QualType(), Pattern, IndexExpr, Expansions); + DependentPackIndexingTypes.InsertNode(Canon, InsertPos); + } + Canonical = QualType(Canon, 0); + } + + void *Mem = + Allocate(PackIndexingType::totalSizeToAlloc<QualType>(Expansions.size()), + TypeAlignment); + auto *T = new (Mem) + PackIndexingType(*this, Canonical, Pattern, IndexExpr, Expansions); + Types.push_back(T); + return QualType(T, 0); +} + /// getUnaryTransformationType - We don't unique these, since the memory /// savings are minimal and these are rare. QualType ASTContext::getUnaryTransformType(QualType BaseType, @@ -5506,33 +6167,25 @@ QualType ASTContext::getUnaryTransformType(QualType BaseType, if (!Canon) { // Build a new, canonical __underlying_type(type) type. - Canon = new (*this, TypeAlignment) - DependentUnaryTransformType(*this, getCanonicalType(BaseType), - Kind); + Canon = new (*this, alignof(DependentUnaryTransformType)) + DependentUnaryTransformType(*this, getCanonicalType(BaseType), Kind); DependentUnaryTransformTypes.InsertNode(Canon, InsertPos); } - ut = new (*this, TypeAlignment) UnaryTransformType (BaseType, - QualType(), Kind, - QualType(Canon, 0)); + ut = new (*this, alignof(UnaryTransformType)) + UnaryTransformType(BaseType, QualType(), Kind, QualType(Canon, 0)); } else { QualType CanonType = getCanonicalType(UnderlyingType); - ut = new (*this, TypeAlignment) UnaryTransformType (BaseType, - UnderlyingType, Kind, - CanonType); + ut = new (*this, alignof(UnaryTransformType)) + UnaryTransformType(BaseType, UnderlyingType, Kind, CanonType); } Types.push_back(ut); return QualType(ut, 0); } -/// getAutoType - Return the uniqued reference to the 'auto' type which has been -/// deduced to the given type, or to the canonical undeduced 'auto' type, or the -/// canonical deduced-but-dependent 'auto' type. -QualType -ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword, - bool IsDependent, bool IsPack, - ConceptDecl *TypeConstraintConcept, - ArrayRef<TemplateArgument> TypeConstraintArgs) const { - assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack"); +QualType ASTContext::getAutoTypeInternal( + QualType DeducedType, AutoTypeKeyword Keyword, bool IsDependent, + bool IsPack, ConceptDecl *TypeConstraintConcept, + ArrayRef<TemplateArgument> TypeConstraintArgs, bool IsCanon) const { if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && !TypeConstraintConcept && !IsDependent) return getAutoDeductType(); @@ -5545,21 +6198,77 @@ ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword, if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos)) return QualType(AT, 0); + QualType Canon; + if (!IsCanon) { + if (!DeducedType.isNull()) { + Canon = DeducedType.getCanonicalType(); + } else if (TypeConstraintConcept) { + bool AnyNonCanonArgs = false; + ConceptDecl *CanonicalConcept = TypeConstraintConcept->getCanonicalDecl(); + auto CanonicalConceptArgs = ::getCanonicalTemplateArguments( + *this, TypeConstraintArgs, AnyNonCanonArgs); + if (CanonicalConcept != TypeConstraintConcept || AnyNonCanonArgs) { + Canon = + getAutoTypeInternal(QualType(), Keyword, IsDependent, IsPack, + CanonicalConcept, CanonicalConceptArgs, true); + // Find the insert position again. + [[maybe_unused]] auto *Nothing = + AutoTypes.FindNodeOrInsertPos(ID, InsertPos); + assert(!Nothing && "canonical type broken"); + } + } + } + void *Mem = Allocate(sizeof(AutoType) + - sizeof(TemplateArgument) * TypeConstraintArgs.size(), - TypeAlignment); + sizeof(TemplateArgument) * TypeConstraintArgs.size(), + alignof(AutoType)); auto *AT = new (Mem) AutoType( DeducedType, Keyword, (IsDependent ? TypeDependence::DependentInstantiation : TypeDependence::None) | (IsPack ? TypeDependence::UnexpandedPack : TypeDependence::None), - TypeConstraintConcept, TypeConstraintArgs); + Canon, TypeConstraintConcept, TypeConstraintArgs); Types.push_back(AT); - if (InsertPos) - AutoTypes.InsertNode(AT, InsertPos); + AutoTypes.InsertNode(AT, InsertPos); return QualType(AT, 0); } +/// getAutoType - Return the uniqued reference to the 'auto' type which has been +/// deduced to the given type, or to the canonical undeduced 'auto' type, or the +/// canonical deduced-but-dependent 'auto' type. +QualType +ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword, + bool IsDependent, bool IsPack, + ConceptDecl *TypeConstraintConcept, + ArrayRef<TemplateArgument> TypeConstraintArgs) const { + assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack"); + assert((!IsDependent || DeducedType.isNull()) && + "A dependent auto should be undeduced"); + return getAutoTypeInternal(DeducedType, Keyword, IsDependent, IsPack, + TypeConstraintConcept, TypeConstraintArgs); +} + +QualType ASTContext::getUnconstrainedType(QualType T) const { + QualType CanonT = T.getCanonicalType(); + + // Remove a type-constraint from a top-level auto or decltype(auto). + if (auto *AT = CanonT->getAs<AutoType>()) { + if (!AT->isConstrained()) + return T; + return getQualifiedType(getAutoType(QualType(), AT->getKeyword(), + AT->isDependentType(), + AT->containsUnexpandedParameterPack()), + T.getQualifiers()); + } + + // FIXME: We only support constrained auto at the top level in the type of a + // non-type template parameter at the moment. Once we lift that restriction, + // we'll need to recursively build types containing auto here. + assert(!CanonT->getContainedAutoType() || + !CanonT->getContainedAutoType()->isConstrained()); + return T; +} + /// Return the uniqued reference to the deduced template specialization type /// which has been deduced to the given type, or to the canonical undeduced /// such type, or the canonical deduced-but-dependent such type. @@ -5574,11 +6283,13 @@ QualType ASTContext::getDeducedTemplateSpecializationType( DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos)) return QualType(DTST, 0); - auto *DTST = new (*this, TypeAlignment) + auto *DTST = new (*this, alignof(DeducedTemplateSpecializationType)) DeducedTemplateSpecializationType(Template, DeducedType, IsDependent); + llvm::FoldingSetNodeID TempID; + DTST->Profile(TempID); + assert(ID == TempID && "ID does not match"); Types.push_back(DTST); - if (InsertPos) - DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos); + DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos); return QualType(DTST, 0); } @@ -5604,7 +6315,7 @@ QualType ASTContext::getAtomicType(QualType T) const { AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos); assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; } - auto *New = new (*this, TypeAlignment) AtomicType(T, Canonical); + auto *New = new (*this, alignof(AtomicType)) AtomicType(T, Canonical); Types.push_back(New); AtomicTypes.InsertNode(New, InsertPos); return QualType(New, 0); @@ -5613,9 +6324,9 @@ QualType ASTContext::getAtomicType(QualType T) const { /// getAutoDeductType - Get type pattern for deducing against 'auto'. QualType ASTContext::getAutoDeductType() const { if (AutoDeductTy.isNull()) - AutoDeductTy = QualType(new (*this, TypeAlignment) + AutoDeductTy = QualType(new (*this, alignof(AutoType)) AutoType(QualType(), AutoTypeKeyword::Auto, - TypeDependence::None, + TypeDependence::None, QualType(), /*concept*/ nullptr, /*args*/ {}), 0); return AutoDeductTy; @@ -5686,14 +6397,14 @@ QualType ASTContext::getUIntPtrType() const { /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17) /// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9). QualType ASTContext::getPointerDiffType() const { - return getFromTargetType(Target->getPtrDiffType(0)); + return getFromTargetType(Target->getPtrDiffType(LangAS::Default)); } /// Return the unique unsigned counterpart of "ptrdiff_t" /// integer type. The standard (C11 7.21.6.1p7) refers to this type /// in the definition of %tu format specifier. QualType ASTContext::getUnsignedPointerDiffType() const { - return getFromTargetType(Target->getUnsignedPtrDiffType(0)); + return getFromTargetType(Target->getUnsignedPtrDiffType(LangAS::Default)); } /// Return the unique type for "pid_t" defined in @@ -5713,7 +6424,9 @@ CanQualType ASTContext::getCanonicalParamType(QualType T) const { T = getVariableArrayDecayedType(T); const Type *Ty = T.getTypePtr(); QualType Result; - if (isa<ArrayType>(Ty)) { + if (getLangOpts().HLSL && isa<ConstantArrayType>(Ty)) { + Result = getArrayParameterType(QualType(Ty, 0)); + } else if (isa<ArrayType>(Ty)) { Result = getArrayDecayedType(QualType(Ty,0)); } else if (isa<FunctionType>(Ty)) { Result = getPointerType(QualType(Ty, 0)); @@ -5725,7 +6438,7 @@ CanQualType ASTContext::getCanonicalParamType(QualType T) const { } QualType ASTContext::getUnqualifiedArrayType(QualType type, - Qualifiers &quals) { + Qualifiers &quals) const { SplitQualType splitType = type.getSplitUnqualifiedType(); // FIXME: getSplitUnqualifiedType() actually walks all the way to @@ -5783,7 +6496,11 @@ QualType ASTContext::getUnqualifiedArrayType(QualType type, /// Attempt to unwrap two types that may both be array types with the same bound /// (or both be array types of unknown bound) for the purpose of comparing the /// cv-decomposition of two types per C++ [conv.qual]. -void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2) { +/// +/// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in +/// C++20 [conv.qual], if permitted by the current language mode. +void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2, + bool AllowPiMismatch) { while (true) { auto *AT1 = getAsArrayType(T1); if (!AT1) @@ -5795,12 +6512,21 @@ void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2) { // If we don't have two array types with the same constant bound nor two // incomplete array types, we've unwrapped everything we can. + // C++20 also permits one type to be a constant array type and the other + // to be an incomplete array type. + // FIXME: Consider also unwrapping array of unknown bound and VLA. if (auto *CAT1 = dyn_cast<ConstantArrayType>(AT1)) { auto *CAT2 = dyn_cast<ConstantArrayType>(AT2); - if (!CAT2 || CAT1->getSize() != CAT2->getSize()) + if (!((CAT2 && CAT1->getSize() == CAT2->getSize()) || + (AllowPiMismatch && getLangOpts().CPlusPlus20 && + isa<IncompleteArrayType>(AT2)))) + return; + } else if (isa<IncompleteArrayType>(AT1)) { + if (!(isa<IncompleteArrayType>(AT2) || + (AllowPiMismatch && getLangOpts().CPlusPlus20 && + isa<ConstantArrayType>(AT2)))) return; - } else if (!isa<IncompleteArrayType>(AT1) || - !isa<IncompleteArrayType>(AT2)) { + } else { return; } @@ -5819,10 +6545,14 @@ void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2) { /// "unwraps" pointer and pointer-to-member types to compare them at each /// level. /// +/// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in +/// C++20 [conv.qual], if permitted by the current language mode. +/// /// \return \c true if a pointer type was unwrapped, \c false if we reached a /// pair of types that can't be unwrapped further. -bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2) { - UnwrapSimilarArrayTypes(T1, T2); +bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2, + bool AllowPiMismatch) { + UnwrapSimilarArrayTypes(T1, T2, AllowPiMismatch); const auto *T1PtrType = T1->getAs<PointerType>(); const auto *T2PtrType = T2->getAs<PointerType>(); @@ -5883,7 +6613,7 @@ bool ASTContext::hasCvrSimilarType(QualType T1, QualType T2) { if (hasSameType(T1, T2)) return true; - if (!UnwrapSimilarTypes(T1, T2)) + if (!UnwrapSimilarTypes(T1, T2, /*AllowPiMismatch*/ false)) return false; } } @@ -5937,13 +6667,18 @@ ASTContext::getNameForTemplate(TemplateName Name, return DeclarationNameInfo(subst->getParameterPack()->getDeclName(), NameLoc); } + case TemplateName::UsingTemplate: + return DeclarationNameInfo(Name.getAsUsingShadowDecl()->getDeclName(), + NameLoc); } llvm_unreachable("bad template name kind!"); } -TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name) const { +TemplateName +ASTContext::getCanonicalTemplateName(const TemplateName &Name) const { switch (Name.getKind()) { + case TemplateName::UsingTemplate: case TemplateName::QualifiedTemplate: case TemplateName::Template: { TemplateDecl *Template = Name.getAsTemplateDecl(); @@ -5971,23 +6706,475 @@ TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name) const { } case TemplateName::SubstTemplateTemplateParmPack: { - SubstTemplateTemplateParmPackStorage *subst - = Name.getAsSubstTemplateTemplateParmPack(); - TemplateTemplateParmDecl *canonParameter - = getCanonicalTemplateTemplateParmDecl(subst->getParameterPack()); - TemplateArgument canonArgPack - = getCanonicalTemplateArgument(subst->getArgumentPack()); - return getSubstTemplateTemplateParmPack(canonParameter, canonArgPack); + SubstTemplateTemplateParmPackStorage *subst = + Name.getAsSubstTemplateTemplateParmPack(); + TemplateArgument canonArgPack = + getCanonicalTemplateArgument(subst->getArgumentPack()); + return getSubstTemplateTemplateParmPack( + canonArgPack, subst->getAssociatedDecl()->getCanonicalDecl(), + subst->getFinal(), subst->getIndex()); } } llvm_unreachable("bad template name!"); } -bool ASTContext::hasSameTemplateName(TemplateName X, TemplateName Y) { - X = getCanonicalTemplateName(X); - Y = getCanonicalTemplateName(Y); - return X.getAsVoidPointer() == Y.getAsVoidPointer(); +bool ASTContext::hasSameTemplateName(const TemplateName &X, + const TemplateName &Y) const { + return getCanonicalTemplateName(X).getAsVoidPointer() == + getCanonicalTemplateName(Y).getAsVoidPointer(); +} + +bool ASTContext::isSameConstraintExpr(const Expr *XCE, const Expr *YCE) const { + if (!XCE != !YCE) + return false; + + if (!XCE) + return true; + + llvm::FoldingSetNodeID XCEID, YCEID; + XCE->Profile(XCEID, *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true); + YCE->Profile(YCEID, *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true); + return XCEID == YCEID; +} + +bool ASTContext::isSameTypeConstraint(const TypeConstraint *XTC, + const TypeConstraint *YTC) const { + if (!XTC != !YTC) + return false; + + if (!XTC) + return true; + + auto *NCX = XTC->getNamedConcept(); + auto *NCY = YTC->getNamedConcept(); + if (!NCX || !NCY || !isSameEntity(NCX, NCY)) + return false; + if (XTC->getConceptReference()->hasExplicitTemplateArgs() != + YTC->getConceptReference()->hasExplicitTemplateArgs()) + return false; + if (XTC->getConceptReference()->hasExplicitTemplateArgs()) + if (XTC->getConceptReference() + ->getTemplateArgsAsWritten() + ->NumTemplateArgs != + YTC->getConceptReference()->getTemplateArgsAsWritten()->NumTemplateArgs) + return false; + + // Compare slowly by profiling. + // + // We couldn't compare the profiling result for the template + // args here. Consider the following example in different modules: + // + // template <__integer_like _Tp, C<_Tp> Sentinel> + // constexpr _Tp operator()(_Tp &&__t, Sentinel &&last) const { + // return __t; + // } + // + // When we compare the profiling result for `C<_Tp>` in different + // modules, it will compare the type of `_Tp` in different modules. + // However, the type of `_Tp` in different modules refer to different + // types here naturally. So we couldn't compare the profiling result + // for the template args directly. + return isSameConstraintExpr(XTC->getImmediatelyDeclaredConstraint(), + YTC->getImmediatelyDeclaredConstraint()); +} + +bool ASTContext::isSameTemplateParameter(const NamedDecl *X, + const NamedDecl *Y) const { + if (X->getKind() != Y->getKind()) + return false; + + if (auto *TX = dyn_cast<TemplateTypeParmDecl>(X)) { + auto *TY = cast<TemplateTypeParmDecl>(Y); + if (TX->isParameterPack() != TY->isParameterPack()) + return false; + if (TX->hasTypeConstraint() != TY->hasTypeConstraint()) + return false; + return isSameTypeConstraint(TX->getTypeConstraint(), + TY->getTypeConstraint()); + } + + if (auto *TX = dyn_cast<NonTypeTemplateParmDecl>(X)) { + auto *TY = cast<NonTypeTemplateParmDecl>(Y); + return TX->isParameterPack() == TY->isParameterPack() && + TX->getASTContext().hasSameType(TX->getType(), TY->getType()) && + isSameConstraintExpr(TX->getPlaceholderTypeConstraint(), + TY->getPlaceholderTypeConstraint()); + } + + auto *TX = cast<TemplateTemplateParmDecl>(X); + auto *TY = cast<TemplateTemplateParmDecl>(Y); + return TX->isParameterPack() == TY->isParameterPack() && + isSameTemplateParameterList(TX->getTemplateParameters(), + TY->getTemplateParameters()); +} + +bool ASTContext::isSameTemplateParameterList( + const TemplateParameterList *X, const TemplateParameterList *Y) const { + if (X->size() != Y->size()) + return false; + + for (unsigned I = 0, N = X->size(); I != N; ++I) + if (!isSameTemplateParameter(X->getParam(I), Y->getParam(I))) + return false; + + return isSameConstraintExpr(X->getRequiresClause(), Y->getRequiresClause()); +} + +bool ASTContext::isSameDefaultTemplateArgument(const NamedDecl *X, + const NamedDecl *Y) const { + // If the type parameter isn't the same already, we don't need to check the + // default argument further. + if (!isSameTemplateParameter(X, Y)) + return false; + + if (auto *TTPX = dyn_cast<TemplateTypeParmDecl>(X)) { + auto *TTPY = cast<TemplateTypeParmDecl>(Y); + if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument()) + return false; + + return hasSameType(TTPX->getDefaultArgument().getArgument().getAsType(), + TTPY->getDefaultArgument().getArgument().getAsType()); + } + + if (auto *NTTPX = dyn_cast<NonTypeTemplateParmDecl>(X)) { + auto *NTTPY = cast<NonTypeTemplateParmDecl>(Y); + if (!NTTPX->hasDefaultArgument() || !NTTPY->hasDefaultArgument()) + return false; + + Expr *DefaultArgumentX = + NTTPX->getDefaultArgument().getArgument().getAsExpr()->IgnoreImpCasts(); + Expr *DefaultArgumentY = + NTTPY->getDefaultArgument().getArgument().getAsExpr()->IgnoreImpCasts(); + llvm::FoldingSetNodeID XID, YID; + DefaultArgumentX->Profile(XID, *this, /*Canonical=*/true); + DefaultArgumentY->Profile(YID, *this, /*Canonical=*/true); + return XID == YID; + } + + auto *TTPX = cast<TemplateTemplateParmDecl>(X); + auto *TTPY = cast<TemplateTemplateParmDecl>(Y); + + if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument()) + return false; + + const TemplateArgument &TAX = TTPX->getDefaultArgument().getArgument(); + const TemplateArgument &TAY = TTPY->getDefaultArgument().getArgument(); + return hasSameTemplateName(TAX.getAsTemplate(), TAY.getAsTemplate()); +} + +static NamespaceDecl *getNamespace(const NestedNameSpecifier *X) { + if (auto *NS = X->getAsNamespace()) + return NS; + if (auto *NAS = X->getAsNamespaceAlias()) + return NAS->getNamespace(); + return nullptr; +} + +static bool isSameQualifier(const NestedNameSpecifier *X, + const NestedNameSpecifier *Y) { + if (auto *NSX = getNamespace(X)) { + auto *NSY = getNamespace(Y); + if (!NSY || NSX->getCanonicalDecl() != NSY->getCanonicalDecl()) + return false; + } else if (X->getKind() != Y->getKind()) + return false; + + // FIXME: For namespaces and types, we're permitted to check that the entity + // is named via the same tokens. We should probably do so. + switch (X->getKind()) { + case NestedNameSpecifier::Identifier: + if (X->getAsIdentifier() != Y->getAsIdentifier()) + return false; + break; + case NestedNameSpecifier::Namespace: + case NestedNameSpecifier::NamespaceAlias: + // We've already checked that we named the same namespace. + break; + case NestedNameSpecifier::TypeSpec: + case NestedNameSpecifier::TypeSpecWithTemplate: + if (X->getAsType()->getCanonicalTypeInternal() != + Y->getAsType()->getCanonicalTypeInternal()) + return false; + break; + case NestedNameSpecifier::Global: + case NestedNameSpecifier::Super: + return true; + } + + // Recurse into earlier portion of NNS, if any. + auto *PX = X->getPrefix(); + auto *PY = Y->getPrefix(); + if (PX && PY) + return isSameQualifier(PX, PY); + return !PX && !PY; +} + +/// Determine whether the attributes we can overload on are identical for A and +/// B. Will ignore any overloadable attrs represented in the type of A and B. +static bool hasSameOverloadableAttrs(const FunctionDecl *A, + const FunctionDecl *B) { + // Note that pass_object_size attributes are represented in the function's + // ExtParameterInfo, so we don't need to check them here. + + llvm::FoldingSetNodeID Cand1ID, Cand2ID; + auto AEnableIfAttrs = A->specific_attrs<EnableIfAttr>(); + auto BEnableIfAttrs = B->specific_attrs<EnableIfAttr>(); + + for (auto Pair : zip_longest(AEnableIfAttrs, BEnableIfAttrs)) { + std::optional<EnableIfAttr *> Cand1A = std::get<0>(Pair); + std::optional<EnableIfAttr *> Cand2A = std::get<1>(Pair); + + // Return false if the number of enable_if attributes is different. + if (!Cand1A || !Cand2A) + return false; + + Cand1ID.clear(); + Cand2ID.clear(); + + (*Cand1A)->getCond()->Profile(Cand1ID, A->getASTContext(), true); + (*Cand2A)->getCond()->Profile(Cand2ID, B->getASTContext(), true); + + // Return false if any of the enable_if expressions of A and B are + // different. + if (Cand1ID != Cand2ID) + return false; + } + return true; +} + +bool ASTContext::isSameEntity(const NamedDecl *X, const NamedDecl *Y) const { + // Caution: this function is called by the AST reader during deserialization, + // so it cannot rely on AST invariants being met. Non-trivial accessors + // should be avoided, along with any traversal of redeclaration chains. + + if (X == Y) + return true; + + if (X->getDeclName() != Y->getDeclName()) + return false; + + // Must be in the same context. + // + // Note that we can't use DeclContext::Equals here, because the DeclContexts + // could be two different declarations of the same function. (We will fix the + // semantic DC to refer to the primary definition after merging.) + if (!declaresSameEntity(cast<Decl>(X->getDeclContext()->getRedeclContext()), + cast<Decl>(Y->getDeclContext()->getRedeclContext()))) + return false; + + // Two typedefs refer to the same entity if they have the same underlying + // type. + if (const auto *TypedefX = dyn_cast<TypedefNameDecl>(X)) + if (const auto *TypedefY = dyn_cast<TypedefNameDecl>(Y)) + return hasSameType(TypedefX->getUnderlyingType(), + TypedefY->getUnderlyingType()); + + // Must have the same kind. + if (X->getKind() != Y->getKind()) + return false; + + // Objective-C classes and protocols with the same name always match. + if (isa<ObjCInterfaceDecl>(X) || isa<ObjCProtocolDecl>(X)) + return true; + + if (isa<ClassTemplateSpecializationDecl>(X)) { + // No need to handle these here: we merge them when adding them to the + // template. + return false; + } + + // Compatible tags match. + if (const auto *TagX = dyn_cast<TagDecl>(X)) { + const auto *TagY = cast<TagDecl>(Y); + return (TagX->getTagKind() == TagY->getTagKind()) || + ((TagX->getTagKind() == TagTypeKind::Struct || + TagX->getTagKind() == TagTypeKind::Class || + TagX->getTagKind() == TagTypeKind::Interface) && + (TagY->getTagKind() == TagTypeKind::Struct || + TagY->getTagKind() == TagTypeKind::Class || + TagY->getTagKind() == TagTypeKind::Interface)); + } + + // Functions with the same type and linkage match. + // FIXME: This needs to cope with merging of prototyped/non-prototyped + // functions, etc. + if (const auto *FuncX = dyn_cast<FunctionDecl>(X)) { + const auto *FuncY = cast<FunctionDecl>(Y); + if (const auto *CtorX = dyn_cast<CXXConstructorDecl>(X)) { + const auto *CtorY = cast<CXXConstructorDecl>(Y); + if (CtorX->getInheritedConstructor() && + !isSameEntity(CtorX->getInheritedConstructor().getConstructor(), + CtorY->getInheritedConstructor().getConstructor())) + return false; + } + + if (FuncX->isMultiVersion() != FuncY->isMultiVersion()) + return false; + + // Multiversioned functions with different feature strings are represented + // as separate declarations. + if (FuncX->isMultiVersion()) { + const auto *TAX = FuncX->getAttr<TargetAttr>(); + const auto *TAY = FuncY->getAttr<TargetAttr>(); + assert(TAX && TAY && "Multiversion Function without target attribute"); + + if (TAX->getFeaturesStr() != TAY->getFeaturesStr()) + return false; + } + + // Per C++20 [temp.over.link]/4, friends in different classes are sometimes + // not the same entity if they are constrained. + if ((FuncX->isMemberLikeConstrainedFriend() || + FuncY->isMemberLikeConstrainedFriend()) && + !FuncX->getLexicalDeclContext()->Equals( + FuncY->getLexicalDeclContext())) { + return false; + } + + if (!isSameConstraintExpr(FuncX->getTrailingRequiresClause(), + FuncY->getTrailingRequiresClause())) + return false; + + auto GetTypeAsWritten = [](const FunctionDecl *FD) { + // Map to the first declaration that we've already merged into this one. + // The TSI of redeclarations might not match (due to calling conventions + // being inherited onto the type but not the TSI), but the TSI type of + // the first declaration of the function should match across modules. + FD = FD->getCanonicalDecl(); + return FD->getTypeSourceInfo() ? FD->getTypeSourceInfo()->getType() + : FD->getType(); + }; + QualType XT = GetTypeAsWritten(FuncX), YT = GetTypeAsWritten(FuncY); + if (!hasSameType(XT, YT)) { + // We can get functions with different types on the redecl chain in C++17 + // if they have differing exception specifications and at least one of + // the excpetion specs is unresolved. + auto *XFPT = XT->getAs<FunctionProtoType>(); + auto *YFPT = YT->getAs<FunctionProtoType>(); + if (getLangOpts().CPlusPlus17 && XFPT && YFPT && + (isUnresolvedExceptionSpec(XFPT->getExceptionSpecType()) || + isUnresolvedExceptionSpec(YFPT->getExceptionSpecType())) && + hasSameFunctionTypeIgnoringExceptionSpec(XT, YT)) + return true; + return false; + } + + return FuncX->getLinkageInternal() == FuncY->getLinkageInternal() && + hasSameOverloadableAttrs(FuncX, FuncY); + } + + // Variables with the same type and linkage match. + if (const auto *VarX = dyn_cast<VarDecl>(X)) { + const auto *VarY = cast<VarDecl>(Y); + if (VarX->getLinkageInternal() == VarY->getLinkageInternal()) { + // During deserialization, we might compare variables before we load + // their types. Assume the types will end up being the same. + if (VarX->getType().isNull() || VarY->getType().isNull()) + return true; + + if (hasSameType(VarX->getType(), VarY->getType())) + return true; + + // We can get decls with different types on the redecl chain. Eg. + // template <typename T> struct S { static T Var[]; }; // #1 + // template <typename T> T S<T>::Var[sizeof(T)]; // #2 + // Only? happens when completing an incomplete array type. In this case + // when comparing #1 and #2 we should go through their element type. + const ArrayType *VarXTy = getAsArrayType(VarX->getType()); + const ArrayType *VarYTy = getAsArrayType(VarY->getType()); + if (!VarXTy || !VarYTy) + return false; + if (VarXTy->isIncompleteArrayType() || VarYTy->isIncompleteArrayType()) + return hasSameType(VarXTy->getElementType(), VarYTy->getElementType()); + } + return false; + } + + // Namespaces with the same name and inlinedness match. + if (const auto *NamespaceX = dyn_cast<NamespaceDecl>(X)) { + const auto *NamespaceY = cast<NamespaceDecl>(Y); + return NamespaceX->isInline() == NamespaceY->isInline(); + } + + // Identical template names and kinds match if their template parameter lists + // and patterns match. + if (const auto *TemplateX = dyn_cast<TemplateDecl>(X)) { + const auto *TemplateY = cast<TemplateDecl>(Y); + + // ConceptDecl wouldn't be the same if their constraint expression differs. + if (const auto *ConceptX = dyn_cast<ConceptDecl>(X)) { + const auto *ConceptY = cast<ConceptDecl>(Y); + if (!isSameConstraintExpr(ConceptX->getConstraintExpr(), + ConceptY->getConstraintExpr())) + return false; + } + + return isSameEntity(TemplateX->getTemplatedDecl(), + TemplateY->getTemplatedDecl()) && + isSameTemplateParameterList(TemplateX->getTemplateParameters(), + TemplateY->getTemplateParameters()); + } + + // Fields with the same name and the same type match. + if (const auto *FDX = dyn_cast<FieldDecl>(X)) { + const auto *FDY = cast<FieldDecl>(Y); + // FIXME: Also check the bitwidth is odr-equivalent, if any. + return hasSameType(FDX->getType(), FDY->getType()); + } + + // Indirect fields with the same target field match. + if (const auto *IFDX = dyn_cast<IndirectFieldDecl>(X)) { + const auto *IFDY = cast<IndirectFieldDecl>(Y); + return IFDX->getAnonField()->getCanonicalDecl() == + IFDY->getAnonField()->getCanonicalDecl(); + } + + // Enumerators with the same name match. + if (isa<EnumConstantDecl>(X)) + // FIXME: Also check the value is odr-equivalent. + return true; + + // Using shadow declarations with the same target match. + if (const auto *USX = dyn_cast<UsingShadowDecl>(X)) { + const auto *USY = cast<UsingShadowDecl>(Y); + return declaresSameEntity(USX->getTargetDecl(), USY->getTargetDecl()); + } + + // Using declarations with the same qualifier match. (We already know that + // the name matches.) + if (const auto *UX = dyn_cast<UsingDecl>(X)) { + const auto *UY = cast<UsingDecl>(Y); + return isSameQualifier(UX->getQualifier(), UY->getQualifier()) && + UX->hasTypename() == UY->hasTypename() && + UX->isAccessDeclaration() == UY->isAccessDeclaration(); + } + if (const auto *UX = dyn_cast<UnresolvedUsingValueDecl>(X)) { + const auto *UY = cast<UnresolvedUsingValueDecl>(Y); + return isSameQualifier(UX->getQualifier(), UY->getQualifier()) && + UX->isAccessDeclaration() == UY->isAccessDeclaration(); + } + if (const auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(X)) { + return isSameQualifier( + UX->getQualifier(), + cast<UnresolvedUsingTypenameDecl>(Y)->getQualifier()); + } + + // Using-pack declarations are only created by instantiation, and match if + // they're instantiated from matching UnresolvedUsing...Decls. + if (const auto *UX = dyn_cast<UsingPackDecl>(X)) { + return declaresSameEntity( + UX->getInstantiatedFromUsingDecl(), + cast<UsingPackDecl>(Y)->getInstantiatedFromUsingDecl()); + } + + // Namespace alias definitions with the same target match. + if (const auto *NAX = dyn_cast<NamespaceAliasDecl>(X)) { + const auto *NAY = cast<NamespaceAliasDecl>(Y); + return NAX->getNamespace()->Equals(NAY->getNamespace()); + } + + return false; } TemplateArgument @@ -6001,39 +7188,43 @@ ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const { case TemplateArgument::Declaration: { auto *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl()); - return TemplateArgument(D, Arg.getParamTypeForDecl()); + return TemplateArgument(D, getCanonicalType(Arg.getParamTypeForDecl()), + Arg.getIsDefaulted()); } case TemplateArgument::NullPtr: return TemplateArgument(getCanonicalType(Arg.getNullPtrType()), - /*isNullPtr*/true); + /*isNullPtr*/ true, Arg.getIsDefaulted()); case TemplateArgument::Template: - return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate())); + return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate()), + Arg.getIsDefaulted()); case TemplateArgument::TemplateExpansion: - return TemplateArgument(getCanonicalTemplateName( - Arg.getAsTemplateOrTemplatePattern()), - Arg.getNumTemplateExpansions()); + return TemplateArgument( + getCanonicalTemplateName(Arg.getAsTemplateOrTemplatePattern()), + Arg.getNumTemplateExpansions(), Arg.getIsDefaulted()); case TemplateArgument::Integral: return TemplateArgument(Arg, getCanonicalType(Arg.getIntegralType())); + case TemplateArgument::StructuralValue: + return TemplateArgument(*this, + getCanonicalType(Arg.getStructuralValueType()), + Arg.getAsStructuralValue()); + case TemplateArgument::Type: - return TemplateArgument(getCanonicalType(Arg.getAsType())); + return TemplateArgument(getCanonicalType(Arg.getAsType()), + /*isNullPtr*/ false, Arg.getIsDefaulted()); case TemplateArgument::Pack: { - if (Arg.pack_size() == 0) + bool AnyNonCanonArgs = false; + auto CanonArgs = ::getCanonicalTemplateArguments( + *this, Arg.pack_elements(), AnyNonCanonArgs); + if (!AnyNonCanonArgs) return Arg; - - auto *CanonArgs = new (*this) TemplateArgument[Arg.pack_size()]; - unsigned Idx = 0; - for (TemplateArgument::pack_iterator A = Arg.pack_begin(), - AEnd = Arg.pack_end(); - A != AEnd; (void)++A, ++Idx) - CanonArgs[Idx] = getCanonicalTemplateArgument(*A); - - return TemplateArgument(llvm::makeArrayRef(CanonArgs, Arg.pack_size())); + return TemplateArgument::CreatePackCopy(const_cast<ASTContext &>(*this), + CanonArgs); } } @@ -6057,14 +7248,14 @@ ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const { // A namespace is canonical; build a nested-name-specifier with // this namespace and no prefix. return NestedNameSpecifier::Create(*this, nullptr, - NNS->getAsNamespace()->getOriginalNamespace()); + NNS->getAsNamespace()->getFirstDecl()); case NestedNameSpecifier::NamespaceAlias: // A namespace is canonical; build a nested-name-specifier with // this namespace and no prefix. - return NestedNameSpecifier::Create(*this, nullptr, - NNS->getAsNamespaceAlias()->getNamespace() - ->getOriginalNamespace()); + return NestedNameSpecifier::Create( + *this, nullptr, + NNS->getAsNamespaceAlias()->getNamespace()->getFirstDecl()); // The difference between TypeSpec and TypeSpecWithTemplate is that the // latter will have the 'template' keyword when printed. @@ -6080,16 +7271,13 @@ ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const { // typedef typename T::type T1; // typedef typename T1::type T2; if (const auto *DNT = T->getAs<DependentNameType>()) - return NestedNameSpecifier::Create( - *this, DNT->getQualifier(), - const_cast<IdentifierInfo *>(DNT->getIdentifier())); + return NestedNameSpecifier::Create(*this, DNT->getQualifier(), + DNT->getIdentifier()); if (const auto *DTST = T->getAs<DependentTemplateSpecializationType>()) - return NestedNameSpecifier::Create(*this, DTST->getQualifier(), true, - const_cast<Type *>(T)); + return NestedNameSpecifier::Create(*this, DTST->getQualifier(), true, T); // TODO: Set 'Template' parameter to true for other template types. - return NestedNameSpecifier::Create(*this, nullptr, false, - const_cast<Type *>(T)); + return NestedNameSpecifier::Create(*this, nullptr, false, T); } case NestedNameSpecifier::Global: @@ -6160,6 +7348,8 @@ const ArrayType *ASTContext::getAsArrayType(QualType T) const { } QualType ASTContext::getAdjustedParameterType(QualType T) const { + if (getLangOpts().HLSL && T->isConstantArrayType()) + return getArrayParameterType(T); if (T->isArrayType() || T->isFunctionType()) return getDecayedType(T); return T; @@ -6205,7 +7395,7 @@ QualType ASTContext::getArrayDecayedType(QualType Ty) const { PrettyArrayType->getIndexTypeQualifiers()); // int x[_Nullable] -> int * _Nullable - if (auto Nullability = Ty->getNullability(*this)) { + if (auto Nullability = Ty->getNullability()) { Result = const_cast<ASTContext *>(this)->getAttributedType( AttributedType::getNullabilityAttrKind(*Nullability), Result, Result); } @@ -6235,13 +7425,28 @@ uint64_t ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const { uint64_t ElementCount = 1; do { - ElementCount *= CA->getSize().getZExtValue(); + ElementCount *= CA->getZExtSize(); CA = dyn_cast_or_null<ConstantArrayType>( CA->getElementType()->getAsArrayTypeUnsafe()); } while (CA); return ElementCount; } +uint64_t ASTContext::getArrayInitLoopExprElementCount( + const ArrayInitLoopExpr *AILE) const { + if (!AILE) + return 0; + + uint64_t ElementCount = 1; + + do { + ElementCount *= AILE->getArraySize().getZExtValue(); + AILE = dyn_cast<ArrayInitLoopExpr>(AILE->getSubExpr()); + } while (AILE); + + return ElementCount; +} + /// getFloatingRank - Return a relative rank for floating point types. /// This routine will assert if passed a built-in type that isn't a float. static FloatingRank getFloatingRank(QualType T) { @@ -6257,41 +7462,10 @@ static FloatingRank getFloatingRank(QualType T) { case BuiltinType::LongDouble: return LongDoubleRank; case BuiltinType::Float128: return Float128Rank; case BuiltinType::BFloat16: return BFloat16Rank; + case BuiltinType::Ibm128: return Ibm128Rank; } } -/// getFloatingTypeOfSizeWithinDomain - Returns a real floating -/// point or a complex type (based on typeDomain/typeSize). -/// 'typeDomain' is a real floating point or complex type. -/// 'typeSize' is a real floating point or complex type. -QualType ASTContext::getFloatingTypeOfSizeWithinDomain(QualType Size, - QualType Domain) const { - FloatingRank EltRank = getFloatingRank(Size); - if (Domain->isComplexType()) { - switch (EltRank) { - case BFloat16Rank: llvm_unreachable("Complex bfloat16 is not supported"); - case Float16Rank: - case HalfRank: llvm_unreachable("Complex half is not supported"); - case FloatRank: return FloatComplexTy; - case DoubleRank: return DoubleComplexTy; - case LongDoubleRank: return LongDoubleComplexTy; - case Float128Rank: return Float128ComplexTy; - } - } - - assert(Domain->isRealFloatingType() && "Unknown domain!"); - switch (EltRank) { - case Float16Rank: return HalfTy; - case BFloat16Rank: return BFloat16Ty; - case HalfRank: return HalfTy; - case FloatRank: return FloatTy; - case DoubleRank: return DoubleTy; - case LongDoubleRank: return LongDoubleTy; - case Float128Rank: return Float128Ty; - } - llvm_unreachable("getFloatingRank(): illegal value for rank"); -} - /// getFloatingTypeOrder - Compare the rank of the two specified floating /// point types, ignoring the domain of the type (i.e. 'double' == /// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If @@ -6321,7 +7495,7 @@ unsigned ASTContext::getIntegerRank(const Type *T) const { // Results in this 'losing' to any type of the same size, but winning if // larger. - if (const auto *EIT = dyn_cast<ExtIntType>(T)) + if (const auto *EIT = dyn_cast<BitIntType>(T)) return 0 + (EIT->getNumBits() << 3); switch (cast<BuiltinType>(T)->getKind()) { @@ -6348,6 +7522,21 @@ unsigned ASTContext::getIntegerRank(const Type *T) const { case BuiltinType::Int128: case BuiltinType::UInt128: return 7 + (getIntWidth(Int128Ty) << 3); + + // "The ranks of char8_t, char16_t, char32_t, and wchar_t equal the ranks of + // their underlying types" [c++20 conv.rank] + case BuiltinType::Char8: + return getIntegerRank(UnsignedCharTy.getTypePtr()); + case BuiltinType::Char16: + return getIntegerRank( + getFromTargetType(Target->getChar16Type()).getTypePtr()); + case BuiltinType::Char32: + return getIntegerRank( + getFromTargetType(Target->getChar32Type()).getTypePtr()); + case BuiltinType::WChar_S: + case BuiltinType::WChar_U: + return getIntegerRank( + getFromTargetType(Target->getWCharType()).getTypePtr()); } } @@ -6394,6 +7583,14 @@ QualType ASTContext::isPromotableBitField(Expr *E) const { // We perform that promotion here to match GCC and C++. // FIXME: C does not permit promotion of an enum bit-field whose rank is // greater than that of 'int'. We perform that promotion to match GCC. + // + // C23 6.3.1.1p2: + // The value from a bit-field of a bit-precise integer type is converted to + // the corresponding bit-precise integer type. (The rest is the same as in + // C11.) + if (QualType QT = Field->getType(); QT->isBitIntType()) + return QT; + if (BitWidth < IntSize) return IntTy; @@ -6413,7 +7610,7 @@ QualType ASTContext::isPromotableBitField(Expr *E) const { /// integer type. QualType ASTContext::getPromotedIntegerType(QualType Promotable) const { assert(!Promotable.isNull()); - assert(Promotable->isPromotableIntegerType()); + assert(isPromotableIntegerType(Promotable)); if (const auto *ET = Promotable->getAs<EnumType>()) return ET->getDecl()->getPromotionType(); @@ -6433,12 +7630,11 @@ QualType ASTContext::getPromotedIntegerType(QualType Promotable) const { uint64_t FromSize = getTypeSize(BT); QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy, LongLongTy, UnsignedLongLongTy }; - for (size_t Idx = 0; Idx < llvm::array_lengthof(PromoteTypes); ++Idx) { - uint64_t ToSize = getTypeSize(PromoteTypes[Idx]); + for (const auto &PT : PromoteTypes) { + uint64_t ToSize = getTypeSize(PT); if (FromSize < ToSize || - (FromSize == ToSize && - FromIsSigned == PromoteTypes[Idx]->isSignedIntegerType())) - return PromoteTypes[Idx]; + (FromSize == ToSize && FromIsSigned == PT->isSignedIntegerType())) + return PT; } llvm_unreachable("char type should fit into long long"); } @@ -6923,7 +8119,7 @@ std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const { // FIXME: There might(should) be a better way of doing this computation! CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); CharUnits ParmOffset = PtrSize; - for (auto PI : Decl->parameters()) { + for (auto *PI : Decl->parameters()) { QualType PType = PI->getType(); CharUnits sz = getObjCEncodingTypeSize(PType); if (sz.isZero()) @@ -6938,7 +8134,7 @@ std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const { // Argument types. ParmOffset = PtrSize; - for (auto PVDecl : Decl->parameters()) { + for (auto *PVDecl : Decl->parameters()) { QualType PType = PVDecl->getOriginalType(); if (const auto *AT = dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { @@ -6967,7 +8163,7 @@ ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const { getObjCEncodingForType(Decl->getReturnType(), S); CharUnits ParmOffset; // Compute size of all parameters. - for (auto PI : Decl->parameters()) { + for (auto *PI : Decl->parameters()) { QualType PType = PI->getType(); CharUnits sz = getObjCEncodingTypeSize(PType); if (sz.isZero()) @@ -6981,7 +8177,7 @@ ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const { ParmOffset = CharUnits::Zero(); // Argument types. - for (auto PVDecl : Decl->parameters()) { + for (auto *PVDecl : Decl->parameters()) { QualType PType = PVDecl->getOriginalType(); if (const auto *AT = dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { @@ -7005,7 +8201,7 @@ ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const { void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT, QualType T, std::string& S, bool Extended) const { - // Encode type qualifer, 'in', 'inout', etc. for the parameter. + // Encode type qualifier, 'in', 'inout', etc. for the parameter. getObjCEncodingForTypeQualifier(QT, S); // Encode parameter type. ObjCEncOptions Options = ObjCEncOptions() @@ -7113,6 +8309,7 @@ ASTContext::getObjCPropertyImplDeclForPropertyDecl( /// kPropertyWeak = 'W' // 'weak' property /// kPropertyStrong = 'P' // property GC'able /// kPropertyNonAtomic = 'N' // property non-atomic +/// kPropertyOptional = '?' // property optional /// }; /// @endcode std::string @@ -7138,6 +8335,9 @@ ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD, // closely resembles encoding of ivars. getObjCEncodingForPropertyType(PD->getType(), S); + if (PD->isOptional()) + S += ",?"; + if (PD->isReadOnly()) { S += ",R"; if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy) @@ -7188,7 +8388,7 @@ ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD, /// 'l' or 'L' , but not always. For typedefs, we need to use /// 'i' or 'I' instead if encoding a struct field, or a pointer! void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const { - if (isa<TypedefType>(PointeeTy.getTypePtr())) { + if (PointeeTy->getAs<TypedefType>()) { if (const auto *BT = PointeeTy->getAs<BuiltinType>()) { if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32) PointeeTy = UnsignedIntTy; @@ -7263,6 +8463,7 @@ static char getObjCEncodingForPrimitiveType(const ASTContext *C, case BuiltinType::BFloat16: case BuiltinType::Float16: case BuiltinType::Float128: + case BuiltinType::Ibm128: case BuiltinType::Half: case BuiltinType::ShortAccum: case BuiltinType::Accum: @@ -7296,6 +8497,10 @@ static char getObjCEncodingForPrimitiveType(const ASTContext *C, #include "clang/Basic/AArch64SVEACLETypes.def" #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: #include "clang/Basic/RISCVVTypes.def" +#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id: +#include "clang/Basic/WebAssemblyReferenceTypes.def" +#define AMDGPU_TYPE(Name, Id, SingletonId) case BuiltinType::Id: +#include "clang/Basic/AMDGPUTypes.def" { DiagnosticsEngine &Diags = C->getDiagnostics(); unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, @@ -7410,7 +8615,7 @@ static bool hasTemplateSpecializationInEncodedString(const Type *T, if (!CXXRD->hasDefinition() || !VisitBasesAndFields) return false; - for (auto B : CXXRD->bases()) + for (const auto &B : CXXRD->bases()) if (hasTemplateSpecializationInEncodedString(B.getType().getTypePtr(), true)) return true; @@ -7475,7 +8680,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S, // pointee gets emitted _before_ the '^'. The read-only qualifier of // the pointer itself gets ignored, _unless_ we are looking at a typedef! // Also, do not emit the 'r' for anything but the outermost type! - if (isa<TypedefType>(T.getTypePtr())) { + if (T->getAs<TypedefType>()) { if (Options.IsOutermostType() && T.isConstQualified()) { isReadOnly = true; S += 'r'; @@ -7493,7 +8698,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S, // Another legacy compatibility encoding. Some ObjC qualifier and type // combinations need to be rearranged. // Rewrite "in const" from "nr" to "rn" - if (StringRef(S).endswith("nr")) + if (StringRef(S).ends_with("nr")) S.replace(S.end()-2, S.end(), "rn"); } @@ -7553,7 +8758,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S, S += '['; if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) - S += llvm::utostr(CAT->getSize().getZExtValue()); + S += llvm::utostr(CAT->getZExtSize()); else { //Variable length arrays are encoded as a regular array with 0 elements. assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) && @@ -7657,7 +8862,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S, return; } // TODO: Double check to make sure this intentionally falls through. - LLVM_FALLTHROUGH; + [[fallthrough]]; } case Type::ObjCInterface: { @@ -7709,7 +8914,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S, .setExpandStructures()), FD); if (FD || Options.EncodingProperty() || Options.EncodeClassNames()) { - // Note that we do extended encoding of protocol qualifer list + // Note that we do extended encoding of protocol qualifier list // Only when doing ivar or property encoding. S += '"'; for (const auto *I : OPT->quals()) { @@ -7754,14 +8959,19 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S, *NotEncodedT = T; return; + case Type::BitInt: + if (NotEncodedT) + *NotEncodedT = T; + return; + // We could see an undeduced auto type here during error recovery. // Just ignore it. case Type::Auto: case Type::DeducedTemplateSpecialization: return; + case Type::ArrayParameter: case Type::Pipe: - case Type::ExtInt: #define ABSTRACT_TYPE(KIND, BASE) #define TYPE(KIND, BASE) #define DEPENDENT_TYPE(KIND, BASE) \ @@ -7803,14 +9013,12 @@ void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl, } } - unsigned i = 0; for (FieldDecl *Field : RDecl->fields()) { if (!Field->isZeroLengthBitField(*this) && Field->isZeroSize(*this)) continue; - uint64_t offs = layout.getFieldOffset(i); + uint64_t offs = layout.getFieldOffset(Field->getFieldIndex()); FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), std::make_pair(offs, Field)); - ++i; } if (CXXRec && includeVBases) { @@ -8002,21 +9210,18 @@ static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) { static TypedefDecl * CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) { + // struct __va_list RecordDecl *VaListTagDecl = Context->buildImplicitRecord("__va_list"); - // namespace std { struct __va_list { - // Note that we create the namespace even in C. This is intentional so that - // the type is consistent between C and C++, which is important in cases where - // the types need to match between translation units (e.g. with - // -fsanitize=cfi-icall). Ideally we wouldn't have created this namespace at - // all, but it's now part of the ABI (e.g. in mangled names), so we can't - // change it. - auto *NS = NamespaceDecl::Create( - const_cast<ASTContext &>(*Context), Context->getTranslationUnitDecl(), - /*Inline*/ false, SourceLocation(), SourceLocation(), - &Context->Idents.get("std"), - /*PrevDecl*/ nullptr); - NS->setImplicit(); - VaListTagDecl->setDeclContext(NS); + if (Context->getLangOpts().CPlusPlus) { + // namespace std { struct __va_list { + auto *NS = NamespaceDecl::Create( + const_cast<ASTContext &>(*Context), Context->getTranslationUnitDecl(), + /*Inline=*/false, SourceLocation(), SourceLocation(), + &Context->Idents.get("std"), + /*PrevDecl=*/nullptr, /*Nested=*/false); + NS->setImplicit(); + VaListTagDecl->setDeclContext(NS); + } VaListTagDecl->startDefinition(); @@ -8123,9 +9328,8 @@ static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) { // typedef __va_list_tag __builtin_va_list[1]; llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); - QualType VaListTagArrayType - = Context->getConstantArrayType(VaListTagTypedefType, - Size, nullptr, ArrayType::Normal, 0); + QualType VaListTagArrayType = Context->getConstantArrayType( + VaListTagTypedefType, Size, nullptr, ArraySizeModifier::Normal, 0); return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); } @@ -8179,7 +9383,7 @@ CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) { // typedef struct __va_list_tag __builtin_va_list[1]; llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); QualType VaListTagArrayType = Context->getConstantArrayType( - VaListTagType, Size, nullptr, ArrayType::Normal, 0); + VaListTagType, Size, nullptr, ArraySizeModifier::Normal, 0); return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); } @@ -8187,7 +9391,7 @@ static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) { // typedef int __builtin_va_list[4]; llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 4); QualType IntArrayType = Context->getConstantArrayType( - Context->IntTy, Size, nullptr, ArrayType::Normal, 0); + Context->IntTy, Size, nullptr, ArraySizeModifier::Normal, 0); return Context->buildImplicitTypedef(IntArrayType, "__builtin_va_list"); } @@ -8200,9 +9404,9 @@ CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) { NamespaceDecl *NS; NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context), Context->getTranslationUnitDecl(), - /*Inline*/false, SourceLocation(), + /*Inline=*/false, SourceLocation(), SourceLocation(), &Context->Idents.get("std"), - /*PrevDecl*/ nullptr); + /*PrevDecl=*/nullptr, /*Nested=*/false); NS->setImplicit(); VaListDecl->setDeclContext(NS); } @@ -8282,7 +9486,7 @@ CreateSystemZBuiltinVaListDecl(const ASTContext *Context) { // typedef __va_list_tag __builtin_va_list[1]; llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); QualType VaListTagArrayType = Context->getConstantArrayType( - VaListTagType, Size, nullptr, ArrayType::Normal, 0); + VaListTagType, Size, nullptr, ArraySizeModifier::Normal, 0); return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); } @@ -8314,8 +9518,8 @@ static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) { FieldDecl *Field = FieldDecl::Create( const_cast<ASTContext &>(*Context), VaListTagDecl, SourceLocation(), SourceLocation(), &Context->Idents.get(FieldNames[i]), FieldTypes[i], - /*TInfo=*/0, - /*BitWidth=*/0, + /*TInfo=*/nullptr, + /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); Field->setAccess(AS_public); VaListTagDecl->addDecl(Field); @@ -8333,7 +9537,7 @@ static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) { // typedef __va_list_tag __builtin_va_list[1]; llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); QualType VaListTagArrayType = Context->getConstantArrayType( - VaListTagTypedefType, Size, nullptr, ArrayType::Normal, 0); + VaListTagTypedefType, Size, nullptr, ArraySizeModifier::Normal, 0); return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); } @@ -8390,6 +9594,10 @@ TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const { } bool ASTContext::canBuiltinBeRedeclared(const FunctionDecl *FD) const { + // Allow redecl custom type checking builtin for HLSL. + if (LangOpts.HLSL && FD->getBuiltinID() != Builtin::NotBuiltin && + BuiltinInfo.hasCustomTypechecking(FD->getBuiltinID())) + return true; return BuiltinInfo.canBeRedeclared(FD->getBuiltinID()); } @@ -8434,11 +9642,11 @@ TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const { /// Retrieve the template name that represents a qualified /// template name such as \c std::vector. -TemplateName -ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS, - bool TemplateKeyword, - TemplateDecl *Template) const { - assert(NNS && "Missing nested-name-specifier in qualified template name"); +TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS, + bool TemplateKeyword, + TemplateName Template) const { + assert(Template.getKind() == TemplateName::Template || + Template.getKind() == TemplateName::UsingTemplate); // FIXME: Canonicalization? llvm::FoldingSetNodeID ID; @@ -8529,18 +9737,20 @@ ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, return TemplateName(QTN); } -TemplateName -ASTContext::getSubstTemplateTemplateParm(TemplateTemplateParmDecl *param, - TemplateName replacement) const { +TemplateName ASTContext::getSubstTemplateTemplateParm( + TemplateName Replacement, Decl *AssociatedDecl, unsigned Index, + std::optional<unsigned> PackIndex) const { llvm::FoldingSetNodeID ID; - SubstTemplateTemplateParmStorage::Profile(ID, param, replacement); + SubstTemplateTemplateParmStorage::Profile(ID, Replacement, AssociatedDecl, + Index, PackIndex); void *insertPos = nullptr; SubstTemplateTemplateParmStorage *subst = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, insertPos); if (!subst) { - subst = new (*this) SubstTemplateTemplateParmStorage(param, replacement); + subst = new (*this) SubstTemplateTemplateParmStorage( + Replacement, AssociatedDecl, Index, PackIndex); SubstTemplateTemplateParms.InsertNode(subst, insertPos); } @@ -8548,20 +9758,21 @@ ASTContext::getSubstTemplateTemplateParm(TemplateTemplateParmDecl *param, } TemplateName -ASTContext::getSubstTemplateTemplateParmPack(TemplateTemplateParmDecl *Param, - const TemplateArgument &ArgPack) const { +ASTContext::getSubstTemplateTemplateParmPack(const TemplateArgument &ArgPack, + Decl *AssociatedDecl, + unsigned Index, bool Final) const { auto &Self = const_cast<ASTContext &>(*this); llvm::FoldingSetNodeID ID; - SubstTemplateTemplateParmPackStorage::Profile(ID, Self, Param, ArgPack); + SubstTemplateTemplateParmPackStorage::Profile(ID, Self, ArgPack, + AssociatedDecl, Index, Final); void *InsertPos = nullptr; SubstTemplateTemplateParmPackStorage *Subst = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos); if (!Subst) { - Subst = new (*this) SubstTemplateTemplateParmPackStorage(Param, - ArgPack.pack_size(), - ArgPack.pack_begin()); + Subst = new (*this) SubstTemplateTemplateParmPackStorage( + ArgPack.pack_elements(), AssociatedDecl, Index, Final); SubstTemplateTemplateParmPacks.InsertNode(Subst, InsertPos); } @@ -8661,14 +9872,18 @@ bool ASTContext::areCompatibleVectorTypes(QualType FirstVec, const auto *Second = SecondVec->castAs<VectorType>(); if (First->getNumElements() == Second->getNumElements() && hasSameType(First->getElementType(), Second->getElementType()) && - First->getVectorKind() != VectorType::AltiVecPixel && - First->getVectorKind() != VectorType::AltiVecBool && - Second->getVectorKind() != VectorType::AltiVecPixel && - Second->getVectorKind() != VectorType::AltiVecBool && - First->getVectorKind() != VectorType::SveFixedLengthDataVector && - First->getVectorKind() != VectorType::SveFixedLengthPredicateVector && - Second->getVectorKind() != VectorType::SveFixedLengthDataVector && - Second->getVectorKind() != VectorType::SveFixedLengthPredicateVector) + First->getVectorKind() != VectorKind::AltiVecPixel && + First->getVectorKind() != VectorKind::AltiVecBool && + Second->getVectorKind() != VectorKind::AltiVecPixel && + Second->getVectorKind() != VectorKind::AltiVecBool && + First->getVectorKind() != VectorKind::SveFixedLengthData && + First->getVectorKind() != VectorKind::SveFixedLengthPredicate && + Second->getVectorKind() != VectorKind::SveFixedLengthData && + Second->getVectorKind() != VectorKind::SveFixedLengthPredicate && + First->getVectorKind() != VectorKind::RVVFixedLengthData && + Second->getVectorKind() != VectorKind::RVVFixedLengthData && + First->getVectorKind() != VectorKind::RVVFixedLengthMask && + Second->getVectorKind() != VectorKind::RVVFixedLengthMask) return true; return false; @@ -8676,29 +9891,26 @@ bool ASTContext::areCompatibleVectorTypes(QualType FirstVec, /// getSVETypeSize - Return SVE vector or predicate register size. static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty) { - assert(Ty->isVLSTBuiltinType() && "Invalid SVE Type"); - return Ty->getKind() == BuiltinType::SveBool - ? Context.getLangOpts().ArmSveVectorBits / Context.getCharWidth() - : Context.getLangOpts().ArmSveVectorBits; + assert(Ty->isSveVLSBuiltinType() && "Invalid SVE Type"); + if (Ty->getKind() == BuiltinType::SveBool || + Ty->getKind() == BuiltinType::SveCount) + return (Context.getLangOpts().VScaleMin * 128) / Context.getCharWidth(); + return Context.getLangOpts().VScaleMin * 128; } bool ASTContext::areCompatibleSveTypes(QualType FirstType, QualType SecondType) { - assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) || - (FirstType->isVectorType() && SecondType->isSizelessBuiltinType())) && - "Expected SVE builtin type and vector type!"); - auto IsValidCast = [this](QualType FirstType, QualType SecondType) { if (const auto *BT = FirstType->getAs<BuiltinType>()) { if (const auto *VT = SecondType->getAs<VectorType>()) { // Predicates have the same representation as uint8 so we also have to // check the kind to make these types incompatible. - if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) + if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate) return BT->getKind() == BuiltinType::SveBool; - else if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) + else if (VT->getVectorKind() == VectorKind::SveFixedLengthData) return VT->getElementType().getCanonicalType() == FirstType->getSveEltType(*this); - else if (VT->getVectorKind() == VectorType::GenericVector) + else if (VT->getVectorKind() == VectorKind::Generic) return getTypeSize(SecondType) == getSVETypeSize(*this, BT) && hasSameType(VT->getElementType(), getBuiltinVectorTypeInfo(BT).ElementType); @@ -8713,26 +9925,21 @@ bool ASTContext::areCompatibleSveTypes(QualType FirstType, bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType, QualType SecondType) { - assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) || - (FirstType->isVectorType() && SecondType->isSizelessBuiltinType())) && - "Expected SVE builtin type and vector type!"); - auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) { const auto *BT = FirstType->getAs<BuiltinType>(); if (!BT) return false; const auto *VecTy = SecondType->getAs<VectorType>(); - if (VecTy && - (VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector || - VecTy->getVectorKind() == VectorType::GenericVector)) { + if (VecTy && (VecTy->getVectorKind() == VectorKind::SveFixedLengthData || + VecTy->getVectorKind() == VectorKind::Generic)) { const LangOptions::LaxVectorConversionKind LVCKind = getLangOpts().getLaxVectorConversions(); // Can not convert between sve predicates and sve vectors because of // different size. if (BT->getKind() == BuiltinType::SveBool && - VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector) + VecTy->getVectorKind() == VectorKind::SveFixedLengthData) return false; // If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion. @@ -8740,7 +9947,7 @@ bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType, // converts to VLAT and VLAT implicitly converts to GNUT." // ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and // predicates. - if (VecTy->getVectorKind() == VectorType::GenericVector && + if (VecTy->getVectorKind() == VectorKind::Generic && getTypeSize(SecondType) != getSVETypeSize(*this, BT)) return false; @@ -8763,6 +9970,97 @@ bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType, IsLaxCompatible(SecondType, FirstType); } +/// getRVVTypeSize - Return RVV vector register size. +static uint64_t getRVVTypeSize(ASTContext &Context, const BuiltinType *Ty) { + assert(Ty->isRVVVLSBuiltinType() && "Invalid RVV Type"); + auto VScale = Context.getTargetInfo().getVScaleRange(Context.getLangOpts()); + if (!VScale) + return 0; + + ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo(Ty); + + uint64_t EltSize = Context.getTypeSize(Info.ElementType); + if (Info.ElementType == Context.BoolTy) + EltSize = 1; + + uint64_t MinElts = Info.EC.getKnownMinValue(); + return VScale->first * MinElts * EltSize; +} + +bool ASTContext::areCompatibleRVVTypes(QualType FirstType, + QualType SecondType) { + assert( + ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) || + (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) && + "Expected RVV builtin type and vector type!"); + + auto IsValidCast = [this](QualType FirstType, QualType SecondType) { + if (const auto *BT = FirstType->getAs<BuiltinType>()) { + if (const auto *VT = SecondType->getAs<VectorType>()) { + if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask) { + BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(BT); + return FirstType->isRVVVLSBuiltinType() && + Info.ElementType == BoolTy && + getTypeSize(SecondType) == getRVVTypeSize(*this, BT); + } + if (VT->getVectorKind() == VectorKind::RVVFixedLengthData || + VT->getVectorKind() == VectorKind::Generic) + return FirstType->isRVVVLSBuiltinType() && + getTypeSize(SecondType) == getRVVTypeSize(*this, BT) && + hasSameType(VT->getElementType(), + getBuiltinVectorTypeInfo(BT).ElementType); + } + } + return false; + }; + + return IsValidCast(FirstType, SecondType) || + IsValidCast(SecondType, FirstType); +} + +bool ASTContext::areLaxCompatibleRVVTypes(QualType FirstType, + QualType SecondType) { + assert( + ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) || + (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) && + "Expected RVV builtin type and vector type!"); + + auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) { + const auto *BT = FirstType->getAs<BuiltinType>(); + if (!BT) + return false; + + if (!BT->isRVVVLSBuiltinType()) + return false; + + const auto *VecTy = SecondType->getAs<VectorType>(); + if (VecTy && VecTy->getVectorKind() == VectorKind::Generic) { + const LangOptions::LaxVectorConversionKind LVCKind = + getLangOpts().getLaxVectorConversions(); + + // If __riscv_v_fixed_vlen != N do not allow vector lax conversion. + if (getTypeSize(SecondType) != getRVVTypeSize(*this, BT)) + return false; + + // If -flax-vector-conversions=all is specified, the types are + // certainly compatible. + if (LVCKind == LangOptions::LaxVectorConversionKind::All) + return true; + + // If -flax-vector-conversions=integer is specified, the types are + // compatible if the elements are integer types. + if (LVCKind == LangOptions::LaxVectorConversionKind::Integer) + return VecTy->getElementType().getCanonicalType()->isIntegerType() && + FirstType->getRVVEltType(*this)->isIntegerType(); + } + + return false; + }; + + return IsLaxCompatible(FirstType, SecondType) || + IsLaxCompatible(SecondType, FirstType); +} + bool ASTContext::hasDirectOwnershipQualifier(QualType Ty) const { while (true) { // __strong id @@ -9078,7 +10376,7 @@ void getIntersectionOfProtocols(ASTContext &Context, llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSProtocolSet; // Start with the protocol qualifiers. - for (auto proto : LHS->quals()) { + for (auto *proto : LHS->quals()) { Context.CollectInheritedProtocols(proto, LHSProtocolSet); } @@ -9089,7 +10387,7 @@ void getIntersectionOfProtocols(ASTContext &Context, llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSProtocolSet; // Start with the protocol qualifiers. - for (auto proto : RHS->quals()) { + for (auto *proto : RHS->quals()) { Context.CollectInheritedProtocols(proto, RHSProtocolSet); } @@ -9097,7 +10395,7 @@ void getIntersectionOfProtocols(ASTContext &Context, Context.CollectInheritedProtocols(RHS->getInterface(), RHSProtocolSet); // Compute the intersection of the collected protocol sets. - for (auto proto : LHSProtocolSet) { + for (auto *proto : LHSProtocolSet) { if (RHSProtocolSet.count(proto)) IntersectionSet.push_back(proto); } @@ -9109,13 +10407,9 @@ void getIntersectionOfProtocols(ASTContext &Context, // Remove any implied protocols from the list of inherited protocols. if (!ImpliedProtocols.empty()) { - IntersectionSet.erase( - std::remove_if(IntersectionSet.begin(), - IntersectionSet.end(), - [&](ObjCProtocolDecl *proto) -> bool { - return ImpliedProtocols.count(proto) > 0; - }), - IntersectionSet.end()); + llvm::erase_if(IntersectionSet, [&](ObjCProtocolDecl *proto) -> bool { + return ImpliedProtocols.contains(proto); + }); } // Sort the remaining protocols by name. @@ -9157,6 +10451,9 @@ static bool sameObjCTypeArgs(ASTContext &ctx, return false; ObjCTypeParamList *typeParams = iface->getTypeParamList(); + if (!typeParams) + return false; + for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) { if (ctx.hasSameType(lhsArgs[i], rhsArgs[i])) continue; @@ -9452,7 +10749,8 @@ QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs, QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, bool OfBlockPointer, bool Unqualified, - bool AllowCXX) { + bool AllowCXX, + bool IsConditionalOperator) { const auto *lbase = lhs->castAs<FunctionType>(); const auto *rbase = rhs->castAs<FunctionType>(); const auto *lproto = dyn_cast<FunctionProtoType>(lbase); @@ -9515,9 +10813,27 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, if (lbaseInfo.getNoCfCheck() != rbaseInfo.getNoCfCheck()) return {}; - // FIXME: some uses, e.g. conditional exprs, really want this to be 'both'. - bool NoReturn = lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn(); - + // When merging declarations, it's common for supplemental information like + // attributes to only be present in one of the declarations, and we generally + // want type merging to preserve the union of information. So a merged + // function type should be noreturn if it was noreturn in *either* operand + // type. + // + // But for the conditional operator, this is backwards. The result of the + // operator could be either operand, and its type should conservatively + // reflect that. So a function type in a composite type is noreturn only + // if it's noreturn in *both* operand types. + // + // Arguably, noreturn is a kind of subtype, and the conditional operator + // ought to produce the most specific common supertype of its operand types. + // That would differ from this rule in contravariant positions. However, + // neither C nor C++ generally uses this kind of subtype reasoning. Also, + // as a practical matter, it would only affect C code that does abstraction of + // higher-order functions (taking noreturn callbacks!), which is uncommon to + // say the least. So we use the simpler rule. + bool NoReturn = IsConditionalOperator + ? lbaseInfo.getNoReturn() && rbaseInfo.getNoReturn() + : lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn(); if (lbaseInfo.getNoReturn() != NoReturn) allLTypes = false; if (rbaseInfo.getNoReturn() != NoReturn) @@ -9525,6 +10841,8 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn); + std::optional<FunctionEffectSet> MergedFX; + if (lproto && rproto) { // two C99 style function prototypes assert((AllowCXX || (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) && @@ -9540,6 +10858,25 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, if (lproto->getMethodQuals() != rproto->getMethodQuals()) return {}; + // Function effects are handled similarly to noreturn, see above. + FunctionEffectsRef LHSFX = lproto->getFunctionEffects(); + FunctionEffectsRef RHSFX = rproto->getFunctionEffects(); + if (LHSFX != RHSFX) { + if (IsConditionalOperator) + MergedFX = FunctionEffectSet::getIntersection(LHSFX, RHSFX); + else { + FunctionEffectSet::Conflicts Errs; + MergedFX = FunctionEffectSet::getUnion(LHSFX, RHSFX, Errs); + // Here we're discarding a possible error due to conflicts in the effect + // sets. But we're not in a context where we can report it. The + // operation does however guarantee maintenance of invariants. + } + if (*MergedFX != LHSFX) + allLTypes = false; + if (*MergedFX != RHSFX) + allRTypes = false; + } + SmallVector<FunctionProtoType::ExtParameterInfo, 4> newParamInfos; bool canUseLeft, canUseRight; if (!mergeExtParameterInfo(lproto, rproto, canUseLeft, canUseRight, @@ -9583,6 +10920,8 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, EPI.ExtInfo = einfo; EPI.ExtParameterInfos = newParamInfos.empty() ? nullptr : newParamInfos.data(); + if (MergedFX) + EPI.FunctionEffects = *MergedFX; return getFunctionType(retType, types, EPI); } @@ -9610,7 +10949,7 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, return {}; } - if (paramTy->isPromotableIntegerType() || + if (isPromotableIntegerType(paramTy) || getCanonicalType(paramTy).getUnqualifiedType() == FloatTy) return {}; } @@ -9620,6 +10959,8 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo(); EPI.ExtInfo = einfo; + if (MergedFX) + EPI.FunctionEffects = *MergedFX; return getFunctionType(retType, proto->getParamTypes(), EPI); } @@ -9650,9 +10991,9 @@ static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET, return {}; } -QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, - bool OfBlockPointer, - bool Unqualified, bool BlockReturnType) { +QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, bool OfBlockPointer, + bool Unqualified, bool BlockReturnType, + bool IsConditionalOperator) { // For C++ we will not reach this code with reference types (see below), // for OpenMP variant call overloading we might. // @@ -9661,12 +11002,13 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, // designates the object or function denoted by the reference, and the // expression is an lvalue unless the reference is an rvalue reference and // the expression is a function call (possibly inside parentheses). - if (LangOpts.OpenMP && LHS->getAs<ReferenceType>() && - RHS->getAs<ReferenceType>() && LHS->getTypeClass() == RHS->getTypeClass()) - return mergeTypes(LHS->getAs<ReferenceType>()->getPointeeType(), - RHS->getAs<ReferenceType>()->getPointeeType(), + auto *LHSRefTy = LHS->getAs<ReferenceType>(); + auto *RHSRefTy = RHS->getAs<ReferenceType>(); + if (LangOpts.OpenMP && LHSRefTy && RHSRefTy && + LHS->getTypeClass() == RHS->getTypeClass()) + return mergeTypes(LHSRefTy->getPointeeType(), RHSRefTy->getPointeeType(), OfBlockPointer, Unqualified, BlockReturnType); - if (LHS->getAs<ReferenceType>() || RHS->getAs<ReferenceType>()) + if (LHSRefTy || RHSRefTy) return {}; if (Unqualified) { @@ -9755,7 +11097,16 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, if (RHS->isObjCIdType() && LHS->isBlockPointerType()) return RHS; } - + // Allow __auto_type to match anything; it merges to the type with more + // information. + if (const auto *AT = LHS->getAs<AutoType>()) { + if (!AT->isDeduced() && AT->isGNUAutoType()) + return RHS; + } + if (const auto *AT = RHS->getAs<AutoType>()) { + if (!AT->isDeduced() && AT->isGNUAutoType()) + return LHS; + } return {}; } @@ -9858,7 +11209,7 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, { const ConstantArrayType* LCAT = getAsConstantArrayType(LHS); const ConstantArrayType* RCAT = getAsConstantArrayType(RHS); - if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize()) + if (LCAT && RCAT && RCAT->getZExtSize() != LCAT->getZExtSize()) return {}; QualType LHSElem = getAsArrayType(LHS)->getElementType(); @@ -9882,7 +11233,7 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, const ConstantArrayType* CAT) -> std::pair<bool,llvm::APInt> { if (VAT) { - Optional<llvm::APSInt> TheInt; + std::optional<llvm::APSInt> TheInt; Expr *E = VAT->getSizeExpr(); if (E && (TheInt = E->getIntegerConstantExpr(*this))) return std::make_pair(true, *TheInt); @@ -9907,12 +11258,10 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, return RHS; if (LCAT) return getConstantArrayType(ResultType, LCAT->getSize(), - LCAT->getSizeExpr(), - ArrayType::ArraySizeModifier(), 0); + LCAT->getSizeExpr(), ArraySizeModifier(), 0); if (RCAT) return getConstantArrayType(ResultType, RCAT->getSize(), - RCAT->getSizeExpr(), - ArrayType::ArraySizeModifier(), 0); + RCAT->getSizeExpr(), ArraySizeModifier(), 0); if (LVAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS; if (RVAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) @@ -9931,11 +11280,11 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, } if (getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS; if (getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS; - return getIncompleteArrayType(ResultType, - ArrayType::ArraySizeModifier(), 0); + return getIncompleteArrayType(ResultType, ArraySizeModifier(), 0); } case Type::FunctionNoProto: - return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified); + return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified, + /*AllowCXX=*/false, IsConditionalOperator); case Type::Record: case Type::Enum: return {}; @@ -9981,14 +11330,18 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, assert(LHS != RHS && "Equivalent pipe types should have already been handled!"); return {}; - case Type::ExtInt: { - // Merge two ext-int types, while trying to preserve typedef info. - bool LHSUnsigned = LHS->castAs<ExtIntType>()->isUnsigned(); - bool RHSUnsigned = RHS->castAs<ExtIntType>()->isUnsigned(); - unsigned LHSBits = LHS->castAs<ExtIntType>()->getNumBits(); - unsigned RHSBits = RHS->castAs<ExtIntType>()->getNumBits(); - - // Like unsigned/int, shouldn't have a type if they dont match. + case Type::ArrayParameter: + assert(LHS != RHS && + "Equivalent ArrayParameter types should have already been handled!"); + return {}; + case Type::BitInt: { + // Merge two bit-precise int types, while trying to preserve typedef info. + bool LHSUnsigned = LHS->castAs<BitIntType>()->isUnsigned(); + bool RHSUnsigned = RHS->castAs<BitIntType>()->isUnsigned(); + unsigned LHSBits = LHS->castAs<BitIntType>()->getNumBits(); + unsigned RHSBits = RHS->castAs<BitIntType>()->getNumBits(); + + // Like unsigned/int, shouldn't have a type if they don't match. if (LHSUnsigned != RHSUnsigned) return {}; @@ -10136,14 +11489,15 @@ unsigned ASTContext::getIntWidth(QualType T) const { T = ET->getDecl()->getIntegerType(); if (T->isBooleanType()) return 1; - if(const auto *EIT = T->getAs<ExtIntType>()) + if (const auto *EIT = T->getAs<BitIntType>()) return EIT->getNumBits(); // For builtin types, just use the standard type sizing method return (unsigned)getTypeSize(T); } QualType ASTContext::getCorrespondingUnsignedType(QualType T) const { - assert((T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) && + assert((T->hasIntegerRepresentation() || T->isEnumeralType() || + T->isFixedPointType()) && "Unexpected type"); // Turn <4 x signed int> -> <4 x unsigned int> @@ -10151,9 +11505,9 @@ QualType ASTContext::getCorrespondingUnsignedType(QualType T) const { return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()), VTy->getNumElements(), VTy->getVectorKind()); - // For _ExtInt, return an unsigned _ExtInt with same width. - if (const auto *EITy = T->getAs<ExtIntType>()) - return getExtIntType(/*IsUnsigned=*/true, EITy->getNumBits()); + // For _BitInt, return an unsigned _BitInt with same width. + if (const auto *EITy = T->getAs<BitIntType>()) + return getBitIntType(/*Unsigned=*/true, EITy->getNumBits()); // For enums, get the underlying integer type of the enum, and let the general // integer type signchanging code handle it. @@ -10161,8 +11515,11 @@ QualType ASTContext::getCorrespondingUnsignedType(QualType T) const { T = ETy->getDecl()->getIntegerType(); switch (T->castAs<BuiltinType>()->getKind()) { + case BuiltinType::Char_U: + // Plain `char` is mapped to `unsigned char` even if it's already unsigned case BuiltinType::Char_S: case BuiltinType::SChar: + case BuiltinType::Char8: return UnsignedCharTy; case BuiltinType::Short: return UnsignedShortTy; @@ -10176,7 +11533,7 @@ QualType ASTContext::getCorrespondingUnsignedType(QualType T) const { return UnsignedInt128Ty; // wchar_t is special. It is either signed or not, but when it's signed, // there's no matching "unsigned wchar_t". Therefore we return the unsigned - // version of it's underlying type instead. + // version of its underlying type instead. case BuiltinType::WChar_S: return getUnsignedWCharType(); @@ -10205,13 +11562,16 @@ QualType ASTContext::getCorrespondingUnsignedType(QualType T) const { case BuiltinType::SatLongFract: return SatUnsignedLongFractTy; default: - llvm_unreachable("Unexpected signed integer or fixed point type"); + assert((T->hasUnsignedIntegerRepresentation() || + T->isUnsignedFixedPointType()) && + "Unexpected signed integer or fixed point type"); + return T; } } QualType ASTContext::getCorrespondingSignedType(QualType T) const { - assert((T->hasUnsignedIntegerRepresentation() || - T->isUnsignedFixedPointType()) && + assert((T->hasIntegerRepresentation() || T->isEnumeralType() || + T->isFixedPointType()) && "Unexpected type"); // Turn <4 x unsigned int> -> <4 x signed int> @@ -10219,9 +11579,9 @@ QualType ASTContext::getCorrespondingSignedType(QualType T) const { return getVectorType(getCorrespondingSignedType(VTy->getElementType()), VTy->getNumElements(), VTy->getVectorKind()); - // For _ExtInt, return a signed _ExtInt with same width. - if (const auto *EITy = T->getAs<ExtIntType>()) - return getExtIntType(/*IsUnsigned=*/false, EITy->getNumBits()); + // For _BitInt, return a signed _BitInt with same width. + if (const auto *EITy = T->getAs<BitIntType>()) + return getBitIntType(/*Unsigned=*/false, EITy->getNumBits()); // For enums, get the underlying integer type of the enum, and let the general // integer type signchanging code handle it. @@ -10229,8 +11589,11 @@ QualType ASTContext::getCorrespondingSignedType(QualType T) const { T = ETy->getDecl()->getIntegerType(); switch (T->castAs<BuiltinType>()->getKind()) { + case BuiltinType::Char_S: + // Plain `char` is mapped to `signed char` even if it's already signed case BuiltinType::Char_U: case BuiltinType::UChar: + case BuiltinType::Char8: return SignedCharTy; case BuiltinType::UShort: return ShortTy; @@ -10244,7 +11607,7 @@ QualType ASTContext::getCorrespondingSignedType(QualType T) const { return Int128Ty; // wchar_t is special. It is either unsigned or not, but when it's unsigned, // there's no matching "signed wchar_t". Therefore we return the signed - // version of it's underlying type instead. + // version of its underlying type instead. case BuiltinType::WChar_U: return getSignedWCharType(); @@ -10273,7 +11636,10 @@ QualType ASTContext::getCorrespondingSignedType(QualType T) const { case BuiltinType::SatULongFract: return SatLongFractTy; default: - llvm_unreachable("Unexpected unsigned integer or fixed point type"); + assert( + (T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) && + "Unexpected signed integer or fixed point type"); + return T; } } @@ -10515,6 +11881,21 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context, Type = Context.getScalableVectorType(ElementType, NumElements); break; } + case 'Q': { + switch (*Str++) { + case 'a': { + Type = Context.SveCountTy; + break; + } + case 'b': { + Type = Context.AMDGPUBufferRsrcTy; + break; + } + default: + llvm_unreachable("Unexpected target builtin type"); + } + break; + } case 'V': { char *End; unsigned NumElements = strtoul(Str, &End, 10); @@ -10526,8 +11907,7 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context, assert(!RequiresICE && "Can't require vector ICE"); // TODO: No way to make AltiVec vectors in builtins yet. - Type = Context.getVectorType(ElementType, NumElements, - VectorType::GenericVector); + Type = Context.getVectorType(ElementType, NumElements, VectorKind::Generic); break; } case 'E': { @@ -10629,7 +12009,7 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context, } // On some targets such as PowerPC, some of the builtins are defined with custom -// type decriptors for target-dependent types. These descriptors are decoded in +// type descriptors for target-dependent types. These descriptors are decoded in // other functions, but it may be useful to be able to fall back to default // descriptor decoding to define builtins mixing target-dependent and target- // independent types. This function allows decoding one type descriptor with @@ -10692,7 +12072,7 @@ QualType ASTContext::GetBuiltinType(unsigned Id, // We really shouldn't be making a no-proto type here. - if (ArgTypes.empty() && Variadic && !getLangOpts().CPlusPlus) + if (ArgTypes.empty() && Variadic && !getLangOpts().requiresStrictPrototypes()) return getFunctionNoProtoType(ResType, EI); FunctionProtoType::ExtProtoInfo EPI; @@ -10712,9 +12092,8 @@ static GVALinkage basicGVALinkageForFunction(const ASTContext &Context, // Non-user-provided functions get emitted as weak definitions with every // use, no matter whether they've been explicitly instantiated etc. - if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) - if (!MD->isUserProvided()) - return GVA_DiscardableODR; + if (!FD->isUserProvided()) + return GVA_DiscardableODR; GVALinkage External; switch (FD->getTemplateSpecializationKind()) { @@ -10764,6 +12143,14 @@ static GVALinkage basicGVALinkageForFunction(const ASTContext &Context, if (FD->isMSExternInline()) return GVA_StrongODR; + if (Context.getTargetInfo().getCXXABI().isMicrosoft() && + isa<CXXConstructorDecl>(FD) && + cast<CXXConstructorDecl>(FD)->isInheritingConstructor()) + // Our approach to inheriting constructors is fundamentally different from + // that used by the MS ABI, so keep our inheriting constructor thunks + // internal rather than trying to pick an unambiguous mangling for them. + return GVA_Internal; + return GVA_DiscardableODR; } @@ -10789,7 +12176,7 @@ static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context, // name between the host and device compilation which is the same for the // same compilation unit whereas different among different compilation // units. - if (Context.shouldExternalizeStaticVar(D)) + if (Context.shouldExternalize(D)) return GVA_StrongExternal; } return L; @@ -10828,6 +12215,16 @@ GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const { static GVALinkage basicGVALinkageForVariable(const ASTContext &Context, const VarDecl *VD) { + // As an extension for interactive REPLs, make sure constant variables are + // only emitted once instead of LinkageComputer::getLVForNamespaceScopeDecl + // marking them as internal. + if (Context.getLangOpts().CPlusPlus && + Context.getLangOpts().IncrementalExtensions && + VD->getType().isConstQualified() && + !VD->getType().isVolatileQualified() && !VD->isInline() && + !isa<VarTemplateSpecializationDecl>(VD) && !VD->getDescribedVarTemplate()) + return GVA_DiscardableODR; + if (!VD->isExternallyVisible()) return GVA_Internal; @@ -10903,7 +12300,7 @@ static GVALinkage basicGVALinkageForVariable(const ASTContext &Context, llvm_unreachable("Invalid Linkage!"); } -GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) { +GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) const { return adjustGVALinkageForExternalDefinitionKind(*this, VD, adjustGVALinkageForAttributes(*this, VD, basicGVALinkageForVariable(*this, VD))); @@ -10995,6 +12392,9 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) { !isMSStaticDataMemberInlineDefinition(VD)) return false; + if (VD->shouldEmitInExternalSource()) + return false; + // Variables that can be needed in other TUs are required. auto Linkage = GetGVALinkageForVariable(VD); if (!isDiscardableGVALinkage(Linkage)) @@ -11038,7 +12438,7 @@ void ASTContext::forEachMultiversionedFunctionVersion( FD->getDeclContext()->getRedeclContext()->lookup(FD->getDeclName())) { FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl(); if (CurFD && hasSameType(CurFD->getType(), FD->getType()) && - std::end(SeenDecls) == llvm::find(SeenDecls, CurFD)) { + !SeenDecls.contains(CurFD)) { SeenDecls.insert(CurFD); Pred(CurFD); } @@ -11078,6 +12478,10 @@ CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic, if (!IsVariadic) return CC_X86RegCall; break; + case LangOptions::DCC_RtdCall: + if (!IsVariadic) + return CC_M68kRTD; + break; } } return Target->getDefaultCallingConv(); @@ -11140,13 +12544,15 @@ MangleContext *ASTContext::createDeviceMangleContext(const TargetInfo &T) { case TargetCXXABI::XL: return ItaniumMangleContext::create( *this, getDiagnostics(), - [](ASTContext &, const NamedDecl *ND) -> llvm::Optional<unsigned> { + [](ASTContext &, const NamedDecl *ND) -> std::optional<unsigned> { if (const auto *RD = dyn_cast<CXXRecordDecl>(ND)) return RD->getDeviceLambdaManglingNumber(); - return llvm::None; - }); + return std::nullopt; + }, + /*IsAux=*/true); case TargetCXXABI::Microsoft: - return MicrosoftMangleContext::create(*this, getDiagnostics()); + return MicrosoftMangleContext::create(*this, getDiagnostics(), + /*IsAux=*/true); } llvm_unreachable("Unsupported ABI"); } @@ -11186,19 +12592,23 @@ QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth, /// sets floating point QualTy according to specified bitwidth. /// Returns empty type if there is no appropriate target types. QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth, - bool ExplicitIEEE) const { - TargetInfo::RealType Ty = - getTargetInfo().getRealTypeByWidth(DestWidth, ExplicitIEEE); + FloatModeKind ExplicitType) const { + FloatModeKind Ty = + getTargetInfo().getRealTypeByWidth(DestWidth, ExplicitType); switch (Ty) { - case TargetInfo::Float: + case FloatModeKind::Half: + return HalfTy; + case FloatModeKind::Float: return FloatTy; - case TargetInfo::Double: + case FloatModeKind::Double: return DoubleTy; - case TargetInfo::LongDouble: + case FloatModeKind::LongDouble: return LongDoubleTy; - case TargetInfo::Float128: + case FloatModeKind::Float128: return Float128Ty; - case TargetInfo::NoFloat: + case FloatModeKind::Ibm128: + return Ibm128Ty; + case FloatModeKind::NoFloat: return {}; } @@ -11206,18 +12616,38 @@ QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth, } void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) { - if (Number > 1) - MangleNumbers[ND] = Number; + if (Number <= 1) + return; + + MangleNumbers[ND] = Number; + + if (Listener) + Listener->AddedManglingNumber(ND, Number); } -unsigned ASTContext::getManglingNumber(const NamedDecl *ND) const { +unsigned ASTContext::getManglingNumber(const NamedDecl *ND, + bool ForAuxTarget) const { auto I = MangleNumbers.find(ND); - return I != MangleNumbers.end() ? I->second : 1; + unsigned Res = I != MangleNumbers.end() ? I->second : 1; + // CUDA/HIP host compilation encodes host and device mangling numbers + // as lower and upper half of 32 bit integer. + if (LangOpts.CUDA && !LangOpts.CUDAIsDevice) { + Res = ForAuxTarget ? Res >> 16 : Res & 0xFFFF; + } else { + assert(!ForAuxTarget && "Only CUDA/HIP host compilation supports mangling " + "number for aux target"); + } + return Res > 1 ? Res : 1; } void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) { - if (Number > 1) - StaticLocalNumbers[VD] = Number; + if (Number <= 1) + return; + + StaticLocalNumbers[VD] = Number; + + if (Listener) + Listener->AddedStaticLocalNumbers(VD, Number); } unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const { @@ -11303,7 +12733,7 @@ QualType ASTContext::getStringLiteralArrayType(QualType EltTy, // Get an array type for the string, according to C99 6.4.5. This includes // the null terminator character. return getConstantArrayType(EltTy, llvm::APInt(32, Length + 1), nullptr, - ArrayType::Normal, /*IndexTypeQuals*/ 0); + ArraySizeModifier::Normal, /*IndexTypeQuals*/ 0); } StringLiteral * @@ -11311,7 +12741,7 @@ ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const { StringLiteral *&Result = StringLiteralCache[Key]; if (!Result) Result = StringLiteral::Create( - *this, Key, StringLiteral::Ascii, + *this, Key, StringLiteralKind::Ordinary, /*Pascal*/ false, getStringLiteralArrayType(CharTy, Key.size()), SourceLocation()); return Result; @@ -11334,6 +12764,23 @@ ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const { return New; } +UnnamedGlobalConstantDecl * +ASTContext::getUnnamedGlobalConstantDecl(QualType Ty, + const APValue &APVal) const { + llvm::FoldingSetNodeID ID; + UnnamedGlobalConstantDecl::Profile(ID, Ty, APVal); + + void *InsertPos; + if (UnnamedGlobalConstantDecl *Existing = + UnnamedGlobalConstantDecls.FindNodeOrInsertPos(ID, InsertPos)) + return Existing; + + UnnamedGlobalConstantDecl *New = + UnnamedGlobalConstantDecl::Create(*this, Ty, APVal); + UnnamedGlobalConstantDecls.InsertNode(New, InsertPos); + return New; +} + TemplateParamObjectDecl * ASTContext::getTemplateParamObjectDecl(QualType T, const APValue &V) const { assert(T->isRecordType() && "template param object of unexpected type"); @@ -11415,10 +12862,978 @@ uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const { } unsigned ASTContext::getTargetAddressSpace(LangAS AS) const { - if (isTargetAddressSpace(AS)) - return toTargetAddressSpace(AS); + return getTargetInfo().getTargetAddressSpace(AS); +} + +bool ASTContext::hasSameExpr(const Expr *X, const Expr *Y) const { + if (X == Y) + return true; + if (!X || !Y) + return false; + llvm::FoldingSetNodeID IDX, IDY; + X->Profile(IDX, *this, /*Canonical=*/true); + Y->Profile(IDY, *this, /*Canonical=*/true); + return IDX == IDY; +} + +// The getCommon* helpers return, for given 'same' X and Y entities given as +// inputs, another entity which is also the 'same' as the inputs, but which +// is closer to the canonical form of the inputs, each according to a given +// criteria. +// The getCommon*Checked variants are 'null inputs not-allowed' equivalents of +// the regular ones. + +static Decl *getCommonDecl(Decl *X, Decl *Y) { + if (!declaresSameEntity(X, Y)) + return nullptr; + for (const Decl *DX : X->redecls()) { + // If we reach Y before reaching the first decl, that means X is older. + if (DX == Y) + return X; + // If we reach the first decl, then Y is older. + if (DX->isFirstDecl()) + return Y; + } + llvm_unreachable("Corrupt redecls chain"); +} + +template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true> +static T *getCommonDecl(T *X, T *Y) { + return cast_or_null<T>( + getCommonDecl(const_cast<Decl *>(cast_or_null<Decl>(X)), + const_cast<Decl *>(cast_or_null<Decl>(Y)))); +} + +template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true> +static T *getCommonDeclChecked(T *X, T *Y) { + return cast<T>(getCommonDecl(const_cast<Decl *>(cast<Decl>(X)), + const_cast<Decl *>(cast<Decl>(Y)))); +} + +static TemplateName getCommonTemplateName(ASTContext &Ctx, TemplateName X, + TemplateName Y) { + if (X.getAsVoidPointer() == Y.getAsVoidPointer()) + return X; + // FIXME: There are cases here where we could find a common template name + // with more sugar. For example one could be a SubstTemplateTemplate* + // replacing the other. + TemplateName CX = Ctx.getCanonicalTemplateName(X); + if (CX.getAsVoidPointer() != + Ctx.getCanonicalTemplateName(Y).getAsVoidPointer()) + return TemplateName(); + return CX; +} + +static TemplateName +getCommonTemplateNameChecked(ASTContext &Ctx, TemplateName X, TemplateName Y) { + TemplateName R = getCommonTemplateName(Ctx, X, Y); + assert(R.getAsVoidPointer() != nullptr); + return R; +} + +static auto getCommonTypes(ASTContext &Ctx, ArrayRef<QualType> Xs, + ArrayRef<QualType> Ys, bool Unqualified = false) { + assert(Xs.size() == Ys.size()); + SmallVector<QualType, 8> Rs(Xs.size()); + for (size_t I = 0; I < Rs.size(); ++I) + Rs[I] = Ctx.getCommonSugaredType(Xs[I], Ys[I], Unqualified); + return Rs; +} + +template <class T> +static SourceLocation getCommonAttrLoc(const T *X, const T *Y) { + return X->getAttributeLoc() == Y->getAttributeLoc() ? X->getAttributeLoc() + : SourceLocation(); +} + +static TemplateArgument getCommonTemplateArgument(ASTContext &Ctx, + const TemplateArgument &X, + const TemplateArgument &Y) { + if (X.getKind() != Y.getKind()) + return TemplateArgument(); + + switch (X.getKind()) { + case TemplateArgument::ArgKind::Type: + if (!Ctx.hasSameType(X.getAsType(), Y.getAsType())) + return TemplateArgument(); + return TemplateArgument( + Ctx.getCommonSugaredType(X.getAsType(), Y.getAsType())); + case TemplateArgument::ArgKind::NullPtr: + if (!Ctx.hasSameType(X.getNullPtrType(), Y.getNullPtrType())) + return TemplateArgument(); + return TemplateArgument( + Ctx.getCommonSugaredType(X.getNullPtrType(), Y.getNullPtrType()), + /*Unqualified=*/true); + case TemplateArgument::ArgKind::Expression: + if (!Ctx.hasSameType(X.getAsExpr()->getType(), Y.getAsExpr()->getType())) + return TemplateArgument(); + // FIXME: Try to keep the common sugar. + return X; + case TemplateArgument::ArgKind::Template: { + TemplateName TX = X.getAsTemplate(), TY = Y.getAsTemplate(); + TemplateName CTN = ::getCommonTemplateName(Ctx, TX, TY); + if (!CTN.getAsVoidPointer()) + return TemplateArgument(); + return TemplateArgument(CTN); + } + case TemplateArgument::ArgKind::TemplateExpansion: { + TemplateName TX = X.getAsTemplateOrTemplatePattern(), + TY = Y.getAsTemplateOrTemplatePattern(); + TemplateName CTN = ::getCommonTemplateName(Ctx, TX, TY); + if (!CTN.getAsVoidPointer()) + return TemplateName(); + auto NExpX = X.getNumTemplateExpansions(); + assert(NExpX == Y.getNumTemplateExpansions()); + return TemplateArgument(CTN, NExpX); + } + default: + // FIXME: Handle the other argument kinds. + return X; + } +} + +static bool getCommonTemplateArguments(ASTContext &Ctx, + SmallVectorImpl<TemplateArgument> &R, + ArrayRef<TemplateArgument> Xs, + ArrayRef<TemplateArgument> Ys) { + if (Xs.size() != Ys.size()) + return true; + R.resize(Xs.size()); + for (size_t I = 0; I < R.size(); ++I) { + R[I] = getCommonTemplateArgument(Ctx, Xs[I], Ys[I]); + if (R[I].isNull()) + return true; + } + return false; +} + +static auto getCommonTemplateArguments(ASTContext &Ctx, + ArrayRef<TemplateArgument> Xs, + ArrayRef<TemplateArgument> Ys) { + SmallVector<TemplateArgument, 8> R; + bool Different = getCommonTemplateArguments(Ctx, R, Xs, Ys); + assert(!Different); + (void)Different; + return R; +} + +template <class T> +static ElaboratedTypeKeyword getCommonTypeKeyword(const T *X, const T *Y) { + return X->getKeyword() == Y->getKeyword() ? X->getKeyword() + : ElaboratedTypeKeyword::None; +} + +template <class T> +static NestedNameSpecifier *getCommonNNS(ASTContext &Ctx, const T *X, + const T *Y) { + // FIXME: Try to keep the common NNS sugar. + return X->getQualifier() == Y->getQualifier() + ? X->getQualifier() + : Ctx.getCanonicalNestedNameSpecifier(X->getQualifier()); +} + +template <class T> +static QualType getCommonElementType(ASTContext &Ctx, const T *X, const T *Y) { + return Ctx.getCommonSugaredType(X->getElementType(), Y->getElementType()); +} + +template <class T> +static QualType getCommonArrayElementType(ASTContext &Ctx, const T *X, + Qualifiers &QX, const T *Y, + Qualifiers &QY) { + QualType EX = X->getElementType(), EY = Y->getElementType(); + QualType R = Ctx.getCommonSugaredType(EX, EY, + /*Unqualified=*/true); + Qualifiers RQ = R.getQualifiers(); + QX += EX.getQualifiers() - RQ; + QY += EY.getQualifiers() - RQ; + return R; +} + +template <class T> +static QualType getCommonPointeeType(ASTContext &Ctx, const T *X, const T *Y) { + return Ctx.getCommonSugaredType(X->getPointeeType(), Y->getPointeeType()); +} + +template <class T> static auto *getCommonSizeExpr(ASTContext &Ctx, T *X, T *Y) { + assert(Ctx.hasSameExpr(X->getSizeExpr(), Y->getSizeExpr())); + return X->getSizeExpr(); +} + +static auto getCommonSizeModifier(const ArrayType *X, const ArrayType *Y) { + assert(X->getSizeModifier() == Y->getSizeModifier()); + return X->getSizeModifier(); +} + +static auto getCommonIndexTypeCVRQualifiers(const ArrayType *X, + const ArrayType *Y) { + assert(X->getIndexTypeCVRQualifiers() == Y->getIndexTypeCVRQualifiers()); + return X->getIndexTypeCVRQualifiers(); +} + +// Merges two type lists such that the resulting vector will contain +// each type (in a canonical sense) only once, in the order they appear +// from X to Y. If they occur in both X and Y, the result will contain +// the common sugared type between them. +static void mergeTypeLists(ASTContext &Ctx, SmallVectorImpl<QualType> &Out, + ArrayRef<QualType> X, ArrayRef<QualType> Y) { + llvm::DenseMap<QualType, unsigned> Found; + for (auto Ts : {X, Y}) { + for (QualType T : Ts) { + auto Res = Found.try_emplace(Ctx.getCanonicalType(T), Out.size()); + if (!Res.second) { + QualType &U = Out[Res.first->second]; + U = Ctx.getCommonSugaredType(U, T); + } else { + Out.emplace_back(T); + } + } + } +} + +FunctionProtoType::ExceptionSpecInfo +ASTContext::mergeExceptionSpecs(FunctionProtoType::ExceptionSpecInfo ESI1, + FunctionProtoType::ExceptionSpecInfo ESI2, + SmallVectorImpl<QualType> &ExceptionTypeStorage, + bool AcceptDependent) { + ExceptionSpecificationType EST1 = ESI1.Type, EST2 = ESI2.Type; + + // If either of them can throw anything, that is the result. + for (auto I : {EST_None, EST_MSAny, EST_NoexceptFalse}) { + if (EST1 == I) + return ESI1; + if (EST2 == I) + return ESI2; + } + + // If either of them is non-throwing, the result is the other. + for (auto I : + {EST_NoThrow, EST_DynamicNone, EST_BasicNoexcept, EST_NoexceptTrue}) { + if (EST1 == I) + return ESI2; + if (EST2 == I) + return ESI1; + } + + // If we're left with value-dependent computed noexcept expressions, we're + // stuck. Before C++17, we can just drop the exception specification entirely, + // since it's not actually part of the canonical type. And this should never + // happen in C++17, because it would mean we were computing the composite + // pointer type of dependent types, which should never happen. + if (EST1 == EST_DependentNoexcept || EST2 == EST_DependentNoexcept) { + assert(AcceptDependent && + "computing composite pointer type of dependent types"); + return FunctionProtoType::ExceptionSpecInfo(); + } + + // Switch over the possibilities so that people adding new values know to + // update this function. + switch (EST1) { + case EST_None: + case EST_DynamicNone: + case EST_MSAny: + case EST_BasicNoexcept: + case EST_DependentNoexcept: + case EST_NoexceptFalse: + case EST_NoexceptTrue: + case EST_NoThrow: + llvm_unreachable("These ESTs should be handled above"); + + case EST_Dynamic: { + // This is the fun case: both exception specifications are dynamic. Form + // the union of the two lists. + assert(EST2 == EST_Dynamic && "other cases should already be handled"); + mergeTypeLists(*this, ExceptionTypeStorage, ESI1.Exceptions, + ESI2.Exceptions); + FunctionProtoType::ExceptionSpecInfo Result(EST_Dynamic); + Result.Exceptions = ExceptionTypeStorage; + return Result; + } + + case EST_Unevaluated: + case EST_Uninstantiated: + case EST_Unparsed: + llvm_unreachable("shouldn't see unresolved exception specifications here"); + } + + llvm_unreachable("invalid ExceptionSpecificationType"); +} + +static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X, + Qualifiers &QX, const Type *Y, + Qualifiers &QY) { + Type::TypeClass TC = X->getTypeClass(); + assert(TC == Y->getTypeClass()); + switch (TC) { +#define UNEXPECTED_TYPE(Class, Kind) \ + case Type::Class: \ + llvm_unreachable("Unexpected " Kind ": " #Class); + +#define NON_CANONICAL_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "non-canonical") +#define TYPE(Class, Base) +#include "clang/AST/TypeNodes.inc" + +#define SUGAR_FREE_TYPE(Class) UNEXPECTED_TYPE(Class, "sugar-free") + SUGAR_FREE_TYPE(Builtin) + SUGAR_FREE_TYPE(DeducedTemplateSpecialization) + SUGAR_FREE_TYPE(DependentBitInt) + SUGAR_FREE_TYPE(Enum) + SUGAR_FREE_TYPE(BitInt) + SUGAR_FREE_TYPE(ObjCInterface) + SUGAR_FREE_TYPE(Record) + SUGAR_FREE_TYPE(SubstTemplateTypeParmPack) + SUGAR_FREE_TYPE(UnresolvedUsing) +#undef SUGAR_FREE_TYPE +#define NON_UNIQUE_TYPE(Class) UNEXPECTED_TYPE(Class, "non-unique") + NON_UNIQUE_TYPE(TypeOfExpr) + NON_UNIQUE_TYPE(VariableArray) +#undef NON_UNIQUE_TYPE + + UNEXPECTED_TYPE(TypeOf, "sugar") + +#undef UNEXPECTED_TYPE + + case Type::Auto: { + const auto *AX = cast<AutoType>(X), *AY = cast<AutoType>(Y); + assert(AX->getDeducedType().isNull()); + assert(AY->getDeducedType().isNull()); + assert(AX->getKeyword() == AY->getKeyword()); + assert(AX->isInstantiationDependentType() == + AY->isInstantiationDependentType()); + auto As = getCommonTemplateArguments(Ctx, AX->getTypeConstraintArguments(), + AY->getTypeConstraintArguments()); + return Ctx.getAutoType(QualType(), AX->getKeyword(), + AX->isInstantiationDependentType(), + AX->containsUnexpandedParameterPack(), + getCommonDeclChecked(AX->getTypeConstraintConcept(), + AY->getTypeConstraintConcept()), + As); + } + case Type::IncompleteArray: { + const auto *AX = cast<IncompleteArrayType>(X), + *AY = cast<IncompleteArrayType>(Y); + return Ctx.getIncompleteArrayType( + getCommonArrayElementType(Ctx, AX, QX, AY, QY), + getCommonSizeModifier(AX, AY), getCommonIndexTypeCVRQualifiers(AX, AY)); + } + case Type::DependentSizedArray: { + const auto *AX = cast<DependentSizedArrayType>(X), + *AY = cast<DependentSizedArrayType>(Y); + return Ctx.getDependentSizedArrayType( + getCommonArrayElementType(Ctx, AX, QX, AY, QY), + getCommonSizeExpr(Ctx, AX, AY), getCommonSizeModifier(AX, AY), + getCommonIndexTypeCVRQualifiers(AX, AY), + AX->getBracketsRange() == AY->getBracketsRange() + ? AX->getBracketsRange() + : SourceRange()); + } + case Type::ConstantArray: { + const auto *AX = cast<ConstantArrayType>(X), + *AY = cast<ConstantArrayType>(Y); + assert(AX->getSize() == AY->getSize()); + const Expr *SizeExpr = Ctx.hasSameExpr(AX->getSizeExpr(), AY->getSizeExpr()) + ? AX->getSizeExpr() + : nullptr; + return Ctx.getConstantArrayType( + getCommonArrayElementType(Ctx, AX, QX, AY, QY), AX->getSize(), SizeExpr, + getCommonSizeModifier(AX, AY), getCommonIndexTypeCVRQualifiers(AX, AY)); + } + case Type::ArrayParameter: { + const auto *AX = cast<ArrayParameterType>(X), + *AY = cast<ArrayParameterType>(Y); + assert(AX->getSize() == AY->getSize()); + const Expr *SizeExpr = Ctx.hasSameExpr(AX->getSizeExpr(), AY->getSizeExpr()) + ? AX->getSizeExpr() + : nullptr; + auto ArrayTy = Ctx.getConstantArrayType( + getCommonArrayElementType(Ctx, AX, QX, AY, QY), AX->getSize(), SizeExpr, + getCommonSizeModifier(AX, AY), getCommonIndexTypeCVRQualifiers(AX, AY)); + return Ctx.getArrayParameterType(ArrayTy); + } + case Type::Atomic: { + const auto *AX = cast<AtomicType>(X), *AY = cast<AtomicType>(Y); + return Ctx.getAtomicType( + Ctx.getCommonSugaredType(AX->getValueType(), AY->getValueType())); + } + case Type::Complex: { + const auto *CX = cast<ComplexType>(X), *CY = cast<ComplexType>(Y); + return Ctx.getComplexType(getCommonArrayElementType(Ctx, CX, QX, CY, QY)); + } + case Type::Pointer: { + const auto *PX = cast<PointerType>(X), *PY = cast<PointerType>(Y); + return Ctx.getPointerType(getCommonPointeeType(Ctx, PX, PY)); + } + case Type::BlockPointer: { + const auto *PX = cast<BlockPointerType>(X), *PY = cast<BlockPointerType>(Y); + return Ctx.getBlockPointerType(getCommonPointeeType(Ctx, PX, PY)); + } + case Type::ObjCObjectPointer: { + const auto *PX = cast<ObjCObjectPointerType>(X), + *PY = cast<ObjCObjectPointerType>(Y); + return Ctx.getObjCObjectPointerType(getCommonPointeeType(Ctx, PX, PY)); + } + case Type::MemberPointer: { + const auto *PX = cast<MemberPointerType>(X), + *PY = cast<MemberPointerType>(Y); + return Ctx.getMemberPointerType( + getCommonPointeeType(Ctx, PX, PY), + Ctx.getCommonSugaredType(QualType(PX->getClass(), 0), + QualType(PY->getClass(), 0)) + .getTypePtr()); + } + case Type::LValueReference: { + const auto *PX = cast<LValueReferenceType>(X), + *PY = cast<LValueReferenceType>(Y); + // FIXME: Preserve PointeeTypeAsWritten. + return Ctx.getLValueReferenceType(getCommonPointeeType(Ctx, PX, PY), + PX->isSpelledAsLValue() || + PY->isSpelledAsLValue()); + } + case Type::RValueReference: { + const auto *PX = cast<RValueReferenceType>(X), + *PY = cast<RValueReferenceType>(Y); + // FIXME: Preserve PointeeTypeAsWritten. + return Ctx.getRValueReferenceType(getCommonPointeeType(Ctx, PX, PY)); + } + case Type::DependentAddressSpace: { + const auto *PX = cast<DependentAddressSpaceType>(X), + *PY = cast<DependentAddressSpaceType>(Y); + assert(Ctx.hasSameExpr(PX->getAddrSpaceExpr(), PY->getAddrSpaceExpr())); + return Ctx.getDependentAddressSpaceType(getCommonPointeeType(Ctx, PX, PY), + PX->getAddrSpaceExpr(), + getCommonAttrLoc(PX, PY)); + } + case Type::FunctionNoProto: { + const auto *FX = cast<FunctionNoProtoType>(X), + *FY = cast<FunctionNoProtoType>(Y); + assert(FX->getExtInfo() == FY->getExtInfo()); + return Ctx.getFunctionNoProtoType( + Ctx.getCommonSugaredType(FX->getReturnType(), FY->getReturnType()), + FX->getExtInfo()); + } + case Type::FunctionProto: { + const auto *FX = cast<FunctionProtoType>(X), + *FY = cast<FunctionProtoType>(Y); + FunctionProtoType::ExtProtoInfo EPIX = FX->getExtProtoInfo(), + EPIY = FY->getExtProtoInfo(); + assert(EPIX.ExtInfo == EPIY.ExtInfo); + assert(EPIX.ExtParameterInfos == EPIY.ExtParameterInfos); + assert(EPIX.RefQualifier == EPIY.RefQualifier); + assert(EPIX.TypeQuals == EPIY.TypeQuals); + assert(EPIX.Variadic == EPIY.Variadic); + + // FIXME: Can we handle an empty EllipsisLoc? + // Use emtpy EllipsisLoc if X and Y differ. + + EPIX.HasTrailingReturn = EPIX.HasTrailingReturn && EPIY.HasTrailingReturn; + + QualType R = + Ctx.getCommonSugaredType(FX->getReturnType(), FY->getReturnType()); + auto P = getCommonTypes(Ctx, FX->param_types(), FY->param_types(), + /*Unqualified=*/true); + + SmallVector<QualType, 8> Exceptions; + EPIX.ExceptionSpec = Ctx.mergeExceptionSpecs( + EPIX.ExceptionSpec, EPIY.ExceptionSpec, Exceptions, true); + return Ctx.getFunctionType(R, P, EPIX); + } + case Type::ObjCObject: { + const auto *OX = cast<ObjCObjectType>(X), *OY = cast<ObjCObjectType>(Y); + assert( + std::equal(OX->getProtocols().begin(), OX->getProtocols().end(), + OY->getProtocols().begin(), OY->getProtocols().end(), + [](const ObjCProtocolDecl *P0, const ObjCProtocolDecl *P1) { + return P0->getCanonicalDecl() == P1->getCanonicalDecl(); + }) && + "protocol lists must be the same"); + auto TAs = getCommonTypes(Ctx, OX->getTypeArgsAsWritten(), + OY->getTypeArgsAsWritten()); + return Ctx.getObjCObjectType( + Ctx.getCommonSugaredType(OX->getBaseType(), OY->getBaseType()), TAs, + OX->getProtocols(), + OX->isKindOfTypeAsWritten() && OY->isKindOfTypeAsWritten()); + } + case Type::ConstantMatrix: { + const auto *MX = cast<ConstantMatrixType>(X), + *MY = cast<ConstantMatrixType>(Y); + assert(MX->getNumRows() == MY->getNumRows()); + assert(MX->getNumColumns() == MY->getNumColumns()); + return Ctx.getConstantMatrixType(getCommonElementType(Ctx, MX, MY), + MX->getNumRows(), MX->getNumColumns()); + } + case Type::DependentSizedMatrix: { + const auto *MX = cast<DependentSizedMatrixType>(X), + *MY = cast<DependentSizedMatrixType>(Y); + assert(Ctx.hasSameExpr(MX->getRowExpr(), MY->getRowExpr())); + assert(Ctx.hasSameExpr(MX->getColumnExpr(), MY->getColumnExpr())); + return Ctx.getDependentSizedMatrixType( + getCommonElementType(Ctx, MX, MY), MX->getRowExpr(), + MX->getColumnExpr(), getCommonAttrLoc(MX, MY)); + } + case Type::Vector: { + const auto *VX = cast<VectorType>(X), *VY = cast<VectorType>(Y); + assert(VX->getNumElements() == VY->getNumElements()); + assert(VX->getVectorKind() == VY->getVectorKind()); + return Ctx.getVectorType(getCommonElementType(Ctx, VX, VY), + VX->getNumElements(), VX->getVectorKind()); + } + case Type::ExtVector: { + const auto *VX = cast<ExtVectorType>(X), *VY = cast<ExtVectorType>(Y); + assert(VX->getNumElements() == VY->getNumElements()); + return Ctx.getExtVectorType(getCommonElementType(Ctx, VX, VY), + VX->getNumElements()); + } + case Type::DependentSizedExtVector: { + const auto *VX = cast<DependentSizedExtVectorType>(X), + *VY = cast<DependentSizedExtVectorType>(Y); + return Ctx.getDependentSizedExtVectorType(getCommonElementType(Ctx, VX, VY), + getCommonSizeExpr(Ctx, VX, VY), + getCommonAttrLoc(VX, VY)); + } + case Type::DependentVector: { + const auto *VX = cast<DependentVectorType>(X), + *VY = cast<DependentVectorType>(Y); + assert(VX->getVectorKind() == VY->getVectorKind()); + return Ctx.getDependentVectorType( + getCommonElementType(Ctx, VX, VY), getCommonSizeExpr(Ctx, VX, VY), + getCommonAttrLoc(VX, VY), VX->getVectorKind()); + } + case Type::InjectedClassName: { + const auto *IX = cast<InjectedClassNameType>(X), + *IY = cast<InjectedClassNameType>(Y); + return Ctx.getInjectedClassNameType( + getCommonDeclChecked(IX->getDecl(), IY->getDecl()), + Ctx.getCommonSugaredType(IX->getInjectedSpecializationType(), + IY->getInjectedSpecializationType())); + } + case Type::TemplateSpecialization: { + const auto *TX = cast<TemplateSpecializationType>(X), + *TY = cast<TemplateSpecializationType>(Y); + auto As = getCommonTemplateArguments(Ctx, TX->template_arguments(), + TY->template_arguments()); + return Ctx.getTemplateSpecializationType( + ::getCommonTemplateNameChecked(Ctx, TX->getTemplateName(), + TY->getTemplateName()), + As, X->getCanonicalTypeInternal()); + } + case Type::Decltype: { + const auto *DX = cast<DecltypeType>(X); + [[maybe_unused]] const auto *DY = cast<DecltypeType>(Y); + assert(DX->isDependentType()); + assert(DY->isDependentType()); + assert(Ctx.hasSameExpr(DX->getUnderlyingExpr(), DY->getUnderlyingExpr())); + // As Decltype is not uniqued, building a common type would be wasteful. + return QualType(DX, 0); + } + case Type::PackIndexing: { + const auto *DX = cast<PackIndexingType>(X); + [[maybe_unused]] const auto *DY = cast<PackIndexingType>(Y); + assert(DX->isDependentType()); + assert(DY->isDependentType()); + assert(Ctx.hasSameExpr(DX->getIndexExpr(), DY->getIndexExpr())); + return QualType(DX, 0); + } + case Type::DependentName: { + const auto *NX = cast<DependentNameType>(X), + *NY = cast<DependentNameType>(Y); + assert(NX->getIdentifier() == NY->getIdentifier()); + return Ctx.getDependentNameType( + getCommonTypeKeyword(NX, NY), getCommonNNS(Ctx, NX, NY), + NX->getIdentifier(), NX->getCanonicalTypeInternal()); + } + case Type::DependentTemplateSpecialization: { + const auto *TX = cast<DependentTemplateSpecializationType>(X), + *TY = cast<DependentTemplateSpecializationType>(Y); + assert(TX->getIdentifier() == TY->getIdentifier()); + auto As = getCommonTemplateArguments(Ctx, TX->template_arguments(), + TY->template_arguments()); + return Ctx.getDependentTemplateSpecializationType( + getCommonTypeKeyword(TX, TY), getCommonNNS(Ctx, TX, TY), + TX->getIdentifier(), As); + } + case Type::UnaryTransform: { + const auto *TX = cast<UnaryTransformType>(X), + *TY = cast<UnaryTransformType>(Y); + assert(TX->getUTTKind() == TY->getUTTKind()); + return Ctx.getUnaryTransformType( + Ctx.getCommonSugaredType(TX->getBaseType(), TY->getBaseType()), + Ctx.getCommonSugaredType(TX->getUnderlyingType(), + TY->getUnderlyingType()), + TX->getUTTKind()); + } + case Type::PackExpansion: { + const auto *PX = cast<PackExpansionType>(X), + *PY = cast<PackExpansionType>(Y); + assert(PX->getNumExpansions() == PY->getNumExpansions()); + return Ctx.getPackExpansionType( + Ctx.getCommonSugaredType(PX->getPattern(), PY->getPattern()), + PX->getNumExpansions(), false); + } + case Type::Pipe: { + const auto *PX = cast<PipeType>(X), *PY = cast<PipeType>(Y); + assert(PX->isReadOnly() == PY->isReadOnly()); + auto MP = PX->isReadOnly() ? &ASTContext::getReadPipeType + : &ASTContext::getWritePipeType; + return (Ctx.*MP)(getCommonElementType(Ctx, PX, PY)); + } + case Type::TemplateTypeParm: { + const auto *TX = cast<TemplateTypeParmType>(X), + *TY = cast<TemplateTypeParmType>(Y); + assert(TX->getDepth() == TY->getDepth()); + assert(TX->getIndex() == TY->getIndex()); + assert(TX->isParameterPack() == TY->isParameterPack()); + return Ctx.getTemplateTypeParmType( + TX->getDepth(), TX->getIndex(), TX->isParameterPack(), + getCommonDecl(TX->getDecl(), TY->getDecl())); + } + } + llvm_unreachable("Unknown Type Class"); +} + +static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X, + const Type *Y, + SplitQualType Underlying) { + Type::TypeClass TC = X->getTypeClass(); + if (TC != Y->getTypeClass()) + return QualType(); + switch (TC) { +#define UNEXPECTED_TYPE(Class, Kind) \ + case Type::Class: \ + llvm_unreachable("Unexpected " Kind ": " #Class); +#define TYPE(Class, Base) +#define DEPENDENT_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "dependent") +#include "clang/AST/TypeNodes.inc" + +#define CANONICAL_TYPE(Class) UNEXPECTED_TYPE(Class, "canonical") + CANONICAL_TYPE(Atomic) + CANONICAL_TYPE(BitInt) + CANONICAL_TYPE(BlockPointer) + CANONICAL_TYPE(Builtin) + CANONICAL_TYPE(Complex) + CANONICAL_TYPE(ConstantArray) + CANONICAL_TYPE(ArrayParameter) + CANONICAL_TYPE(ConstantMatrix) + CANONICAL_TYPE(Enum) + CANONICAL_TYPE(ExtVector) + CANONICAL_TYPE(FunctionNoProto) + CANONICAL_TYPE(FunctionProto) + CANONICAL_TYPE(IncompleteArray) + CANONICAL_TYPE(LValueReference) + CANONICAL_TYPE(MemberPointer) + CANONICAL_TYPE(ObjCInterface) + CANONICAL_TYPE(ObjCObject) + CANONICAL_TYPE(ObjCObjectPointer) + CANONICAL_TYPE(Pipe) + CANONICAL_TYPE(Pointer) + CANONICAL_TYPE(Record) + CANONICAL_TYPE(RValueReference) + CANONICAL_TYPE(VariableArray) + CANONICAL_TYPE(Vector) +#undef CANONICAL_TYPE + +#undef UNEXPECTED_TYPE + + case Type::Adjusted: { + const auto *AX = cast<AdjustedType>(X), *AY = cast<AdjustedType>(Y); + QualType OX = AX->getOriginalType(), OY = AY->getOriginalType(); + if (!Ctx.hasSameType(OX, OY)) + return QualType(); + // FIXME: It's inefficient to have to unify the original types. + return Ctx.getAdjustedType(Ctx.getCommonSugaredType(OX, OY), + Ctx.getQualifiedType(Underlying)); + } + case Type::Decayed: { + const auto *DX = cast<DecayedType>(X), *DY = cast<DecayedType>(Y); + QualType OX = DX->getOriginalType(), OY = DY->getOriginalType(); + if (!Ctx.hasSameType(OX, OY)) + return QualType(); + // FIXME: It's inefficient to have to unify the original types. + return Ctx.getDecayedType(Ctx.getCommonSugaredType(OX, OY), + Ctx.getQualifiedType(Underlying)); + } + case Type::Attributed: { + const auto *AX = cast<AttributedType>(X), *AY = cast<AttributedType>(Y); + AttributedType::Kind Kind = AX->getAttrKind(); + if (Kind != AY->getAttrKind()) + return QualType(); + QualType MX = AX->getModifiedType(), MY = AY->getModifiedType(); + if (!Ctx.hasSameType(MX, MY)) + return QualType(); + // FIXME: It's inefficient to have to unify the modified types. + return Ctx.getAttributedType(Kind, Ctx.getCommonSugaredType(MX, MY), + Ctx.getQualifiedType(Underlying)); + } + case Type::BTFTagAttributed: { + const auto *BX = cast<BTFTagAttributedType>(X); + const BTFTypeTagAttr *AX = BX->getAttr(); + // The attribute is not uniqued, so just compare the tag. + if (AX->getBTFTypeTag() != + cast<BTFTagAttributedType>(Y)->getAttr()->getBTFTypeTag()) + return QualType(); + return Ctx.getBTFTagAttributedType(AX, Ctx.getQualifiedType(Underlying)); + } + case Type::Auto: { + const auto *AX = cast<AutoType>(X), *AY = cast<AutoType>(Y); + + AutoTypeKeyword KW = AX->getKeyword(); + if (KW != AY->getKeyword()) + return QualType(); + + ConceptDecl *CD = ::getCommonDecl(AX->getTypeConstraintConcept(), + AY->getTypeConstraintConcept()); + SmallVector<TemplateArgument, 8> As; + if (CD && + getCommonTemplateArguments(Ctx, As, AX->getTypeConstraintArguments(), + AY->getTypeConstraintArguments())) { + CD = nullptr; // The arguments differ, so make it unconstrained. + As.clear(); + } + + // Both auto types can't be dependent, otherwise they wouldn't have been + // sugar. This implies they can't contain unexpanded packs either. + return Ctx.getAutoType(Ctx.getQualifiedType(Underlying), AX->getKeyword(), + /*IsDependent=*/false, /*IsPack=*/false, CD, As); + } + case Type::PackIndexing: + case Type::Decltype: + return QualType(); + case Type::DeducedTemplateSpecialization: + // FIXME: Try to merge these. + return QualType(); + + case Type::Elaborated: { + const auto *EX = cast<ElaboratedType>(X), *EY = cast<ElaboratedType>(Y); + return Ctx.getElaboratedType( + ::getCommonTypeKeyword(EX, EY), ::getCommonNNS(Ctx, EX, EY), + Ctx.getQualifiedType(Underlying), + ::getCommonDecl(EX->getOwnedTagDecl(), EY->getOwnedTagDecl())); + } + case Type::MacroQualified: { + const auto *MX = cast<MacroQualifiedType>(X), + *MY = cast<MacroQualifiedType>(Y); + const IdentifierInfo *IX = MX->getMacroIdentifier(); + if (IX != MY->getMacroIdentifier()) + return QualType(); + return Ctx.getMacroQualifiedType(Ctx.getQualifiedType(Underlying), IX); + } + case Type::SubstTemplateTypeParm: { + const auto *SX = cast<SubstTemplateTypeParmType>(X), + *SY = cast<SubstTemplateTypeParmType>(Y); + Decl *CD = + ::getCommonDecl(SX->getAssociatedDecl(), SY->getAssociatedDecl()); + if (!CD) + return QualType(); + unsigned Index = SX->getIndex(); + if (Index != SY->getIndex()) + return QualType(); + auto PackIndex = SX->getPackIndex(); + if (PackIndex != SY->getPackIndex()) + return QualType(); + return Ctx.getSubstTemplateTypeParmType(Ctx.getQualifiedType(Underlying), + CD, Index, PackIndex); + } + case Type::ObjCTypeParam: + // FIXME: Try to merge these. + return QualType(); + case Type::Paren: + return Ctx.getParenType(Ctx.getQualifiedType(Underlying)); + + case Type::TemplateSpecialization: { + const auto *TX = cast<TemplateSpecializationType>(X), + *TY = cast<TemplateSpecializationType>(Y); + TemplateName CTN = ::getCommonTemplateName(Ctx, TX->getTemplateName(), + TY->getTemplateName()); + if (!CTN.getAsVoidPointer()) + return QualType(); + SmallVector<TemplateArgument, 8> Args; + if (getCommonTemplateArguments(Ctx, Args, TX->template_arguments(), + TY->template_arguments())) + return QualType(); + return Ctx.getTemplateSpecializationType(CTN, Args, + Ctx.getQualifiedType(Underlying)); + } + case Type::Typedef: { + const auto *TX = cast<TypedefType>(X), *TY = cast<TypedefType>(Y); + const TypedefNameDecl *CD = ::getCommonDecl(TX->getDecl(), TY->getDecl()); + if (!CD) + return QualType(); + return Ctx.getTypedefType(CD, Ctx.getQualifiedType(Underlying)); + } + case Type::TypeOf: { + // The common sugar between two typeof expressions, where one is + // potentially a typeof_unqual and the other is not, we unify to the + // qualified type as that retains the most information along with the type. + // We only return a typeof_unqual type when both types are unqual types. + TypeOfKind Kind = TypeOfKind::Qualified; + if (cast<TypeOfType>(X)->getKind() == cast<TypeOfType>(Y)->getKind() && + cast<TypeOfType>(X)->getKind() == TypeOfKind::Unqualified) + Kind = TypeOfKind::Unqualified; + return Ctx.getTypeOfType(Ctx.getQualifiedType(Underlying), Kind); + } + case Type::TypeOfExpr: + return QualType(); + + case Type::UnaryTransform: { + const auto *UX = cast<UnaryTransformType>(X), + *UY = cast<UnaryTransformType>(Y); + UnaryTransformType::UTTKind KX = UX->getUTTKind(); + if (KX != UY->getUTTKind()) + return QualType(); + QualType BX = UX->getBaseType(), BY = UY->getBaseType(); + if (!Ctx.hasSameType(BX, BY)) + return QualType(); + // FIXME: It's inefficient to have to unify the base types. + return Ctx.getUnaryTransformType(Ctx.getCommonSugaredType(BX, BY), + Ctx.getQualifiedType(Underlying), KX); + } + case Type::Using: { + const auto *UX = cast<UsingType>(X), *UY = cast<UsingType>(Y); + const UsingShadowDecl *CD = + ::getCommonDecl(UX->getFoundDecl(), UY->getFoundDecl()); + if (!CD) + return QualType(); + return Ctx.getUsingType(CD, Ctx.getQualifiedType(Underlying)); + } + case Type::CountAttributed: { + const auto *DX = cast<CountAttributedType>(X), + *DY = cast<CountAttributedType>(Y); + if (DX->isCountInBytes() != DY->isCountInBytes()) + return QualType(); + if (DX->isOrNull() != DY->isOrNull()) + return QualType(); + Expr *CEX = DX->getCountExpr(); + Expr *CEY = DY->getCountExpr(); + llvm::ArrayRef<clang::TypeCoupledDeclRefInfo> CDX = DX->getCoupledDecls(); + if (Ctx.hasSameExpr(CEX, CEY)) + return Ctx.getCountAttributedType(Ctx.getQualifiedType(Underlying), CEX, + DX->isCountInBytes(), DX->isOrNull(), + CDX); + if (!CEX->isIntegerConstantExpr(Ctx) || !CEY->isIntegerConstantExpr(Ctx)) + return QualType(); + // Two declarations with the same integer constant may still differ in their + // expression pointers, so we need to evaluate them. + llvm::APSInt VX = *CEX->getIntegerConstantExpr(Ctx); + llvm::APSInt VY = *CEY->getIntegerConstantExpr(Ctx); + if (VX != VY) + return QualType(); + return Ctx.getCountAttributedType(Ctx.getQualifiedType(Underlying), CEX, + DX->isCountInBytes(), DX->isOrNull(), + CDX); + } + } + llvm_unreachable("Unhandled Type Class"); +} + +static auto unwrapSugar(SplitQualType &T, Qualifiers &QTotal) { + SmallVector<SplitQualType, 8> R; + while (true) { + QTotal.addConsistentQualifiers(T.Quals); + QualType NT = T.Ty->getLocallyUnqualifiedSingleStepDesugaredType(); + if (NT == QualType(T.Ty, 0)) + break; + R.push_back(T); + T = NT.split(); + } + return R; +} + +QualType ASTContext::getCommonSugaredType(QualType X, QualType Y, + bool Unqualified) { + assert(Unqualified ? hasSameUnqualifiedType(X, Y) : hasSameType(X, Y)); + if (X == Y) + return X; + if (!Unqualified) { + if (X.isCanonical()) + return X; + if (Y.isCanonical()) + return Y; + } + + SplitQualType SX = X.split(), SY = Y.split(); + Qualifiers QX, QY; + // Desugar SX and SY, setting the sugar and qualifiers aside into Xs and Ys, + // until we reach their underlying "canonical nodes". Note these are not + // necessarily canonical types, as they may still have sugared properties. + // QX and QY will store the sum of all qualifiers in Xs and Ys respectively. + auto Xs = ::unwrapSugar(SX, QX), Ys = ::unwrapSugar(SY, QY); + if (SX.Ty != SY.Ty) { + // The canonical nodes differ. Build a common canonical node out of the two, + // unifying their sugar. This may recurse back here. + SX.Ty = + ::getCommonNonSugarTypeNode(*this, SX.Ty, QX, SY.Ty, QY).getTypePtr(); + } else { + // The canonical nodes were identical: We may have desugared too much. + // Add any common sugar back in. + while (!Xs.empty() && !Ys.empty() && Xs.back().Ty == Ys.back().Ty) { + QX -= SX.Quals; + QY -= SY.Quals; + SX = Xs.pop_back_val(); + SY = Ys.pop_back_val(); + } + } + if (Unqualified) + QX = Qualifiers::removeCommonQualifiers(QX, QY); else - return (*AddrSpaceMap)[(unsigned)AS]; + assert(QX == QY); + + // Even though the remaining sugar nodes in Xs and Ys differ, some may be + // related. Walk up these nodes, unifying them and adding the result. + while (!Xs.empty() && !Ys.empty()) { + auto Underlying = SplitQualType( + SX.Ty, Qualifiers::removeCommonQualifiers(SX.Quals, SY.Quals)); + SX = Xs.pop_back_val(); + SY = Ys.pop_back_val(); + SX.Ty = ::getCommonSugarTypeNode(*this, SX.Ty, SY.Ty, Underlying) + .getTypePtrOrNull(); + // Stop at the first pair which is unrelated. + if (!SX.Ty) { + SX.Ty = Underlying.Ty; + break; + } + QX -= Underlying.Quals; + }; + + // Add back the missing accumulated qualifiers, which were stripped off + // with the sugar nodes we could not unify. + QualType R = getQualifiedType(SX.Ty, QX); + assert(Unqualified ? hasSameUnqualifiedType(R, X) : hasSameType(R, X)); + return R; +} + +QualType ASTContext::getCorrespondingUnsaturatedType(QualType Ty) const { + assert(Ty->isFixedPointType()); + + if (Ty->isUnsaturatedFixedPointType()) + return Ty; + + switch (Ty->castAs<BuiltinType>()->getKind()) { + default: + llvm_unreachable("Not a saturated fixed point type!"); + case BuiltinType::SatShortAccum: + return ShortAccumTy; + case BuiltinType::SatAccum: + return AccumTy; + case BuiltinType::SatLongAccum: + return LongAccumTy; + case BuiltinType::SatUShortAccum: + return UnsignedShortAccumTy; + case BuiltinType::SatUAccum: + return UnsignedAccumTy; + case BuiltinType::SatULongAccum: + return UnsignedLongAccumTy; + case BuiltinType::SatShortFract: + return ShortFractTy; + case BuiltinType::SatFract: + return FractTy; + case BuiltinType::SatLongFract: + return LongFractTy; + case BuiltinType::SatUShortFract: + return UnsignedShortFractTy; + case BuiltinType::SatUFract: + return UnsignedFractTy; + case BuiltinType::SatULongFract: + return UnsignedLongFractTy; + } } QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const { @@ -11622,18 +14037,26 @@ QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const { } } +// Given a list of FMV features, return a concatenated list of the +// corresponding backend features (which may contain duplicates). +static std::vector<std::string> getFMVBackendFeaturesFor( + const llvm::SmallVectorImpl<StringRef> &FMVFeatStrings) { + std::vector<std::string> BackendFeats; + for (StringRef F : FMVFeatStrings) + if (auto FMVExt = llvm::AArch64::parseFMVExtension(F)) + for (StringRef F : FMVExt->getImpliedFeatures()) + BackendFeats.push_back(F.str()); + return BackendFeats; +} + ParsedTargetAttr ASTContext::filterFunctionTargetAttrs(const TargetAttr *TD) const { assert(TD != nullptr); - ParsedTargetAttr ParsedAttr = TD->parse(); - - ParsedAttr.Features.erase( - llvm::remove_if(ParsedAttr.Features, - [&](const std::string &Feat) { - return !Target->isValidFeatureName( - StringRef{Feat}.substr(1)); - }), - ParsedAttr.Features.end()); + ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(TD->getFeaturesStr()); + + llvm::erase_if(ParsedAttr.Features, [&](const std::string &Feat) { + return !Target->isValidFeatureName(StringRef{Feat}.substr(1)); + }); return ParsedAttr; } @@ -11658,14 +14081,15 @@ void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, // Make a copy of the features as passed on the command line into the // beginning of the additional features from the function to override. - ParsedAttr.Features.insert( - ParsedAttr.Features.begin(), - Target->getTargetOpts().FeaturesAsWritten.begin(), - Target->getTargetOpts().FeaturesAsWritten.end()); + // AArch64 handles command line option features in parseTargetAttr(). + if (!Target->getTriple().isAArch64()) + ParsedAttr.Features.insert( + ParsedAttr.Features.begin(), + Target->getTargetOpts().FeaturesAsWritten.begin(), + Target->getTargetOpts().FeaturesAsWritten.end()); - if (ParsedAttr.Architecture != "" && - Target->isValidCPUName(ParsedAttr.Architecture)) - TargetCPU = ParsedAttr.Architecture; + if (ParsedAttr.CPU != "" && Target->isValidCPUName(ParsedAttr.CPU)) + TargetCPU = ParsedAttr.CPU; // Now populate the feature map, first with the TargetCPU which is either // the default or a new one from the target attribute string. Then we'll use @@ -11678,6 +14102,35 @@ void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, Target->getCPUSpecificCPUDispatchFeatures( SD->getCPUName(GD.getMultiVersionIndex())->getName(), FeaturesTmp); std::vector<std::string> Features(FeaturesTmp.begin(), FeaturesTmp.end()); + Features.insert(Features.begin(), + Target->getTargetOpts().FeaturesAsWritten.begin(), + Target->getTargetOpts().FeaturesAsWritten.end()); + Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); + } else if (const auto *TC = FD->getAttr<TargetClonesAttr>()) { + if (Target->getTriple().isAArch64()) { + llvm::SmallVector<StringRef, 8> Feats; + TC->getFeatures(Feats, GD.getMultiVersionIndex()); + std::vector<std::string> Features = getFMVBackendFeaturesFor(Feats); + Features.insert(Features.begin(), + Target->getTargetOpts().FeaturesAsWritten.begin(), + Target->getTargetOpts().FeaturesAsWritten.end()); + Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); + } else { + std::vector<std::string> Features; + StringRef VersionStr = TC->getFeatureStr(GD.getMultiVersionIndex()); + if (VersionStr.starts_with("arch=")) + TargetCPU = VersionStr.drop_front(sizeof("arch=") - 1); + else if (VersionStr != "default") + Features.push_back((StringRef{"+"} + VersionStr).str()); + Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); + } + } else if (const auto *TV = FD->getAttr<TargetVersionAttr>()) { + llvm::SmallVector<StringRef, 8> Feats; + TV->getFeatures(Feats); + std::vector<std::string> Features = getFMVBackendFeaturesFor(Feats); + Features.insert(Features.begin(), + Target->getTargetOpts().FeaturesAsWritten.begin(), + Target->getTargetOpts().FeaturesAsWritten.end()); Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); } else { FeatureMap = Target->getTargetOpts().FeatureMap; @@ -11697,22 +14150,27 @@ operator<<(const StreamingDiagnostic &DB, return DB << "a prior #pragma section"; } -bool ASTContext::mayExternalizeStaticVar(const Decl *D) const { - bool IsStaticVar = - isa<VarDecl>(D) && cast<VarDecl>(D)->getStorageClass() == SC_Static; +bool ASTContext::mayExternalize(const Decl *D) const { + bool IsInternalVar = + isa<VarDecl>(D) && + basicGVALinkageForVariable(*this, cast<VarDecl>(D)) == GVA_Internal; bool IsExplicitDeviceVar = (D->hasAttr<CUDADeviceAttr>() && !D->getAttr<CUDADeviceAttr>()->isImplicit()) || (D->hasAttr<CUDAConstantAttr>() && !D->getAttr<CUDAConstantAttr>()->isImplicit()); - // CUDA/HIP: static managed variables need to be externalized since it is - // a declaration in IR, therefore cannot have internal linkage. - return IsStaticVar && - (D->hasAttr<HIPManagedAttr>() || IsExplicitDeviceVar); -} - -bool ASTContext::shouldExternalizeStaticVar(const Decl *D) const { - return mayExternalizeStaticVar(D) && - (D->hasAttr<HIPManagedAttr>() || + // CUDA/HIP: managed variables need to be externalized since it is + // a declaration in IR, therefore cannot have internal linkage. Kernels in + // anonymous name space needs to be externalized to avoid duplicate symbols. + return (IsInternalVar && + (D->hasAttr<HIPManagedAttr>() || IsExplicitDeviceVar)) || + (D->hasAttr<CUDAGlobalAttr>() && + basicGVALinkageForFunction(*this, cast<FunctionDecl>(D)) == + GVA_Internal); +} + +bool ASTContext::shouldExternalize(const Decl *D) const { + return mayExternalize(D) && + (D->hasAttr<HIPManagedAttr>() || D->hasAttr<CUDAGlobalAttr>() || CUDADeviceVarODRUsedByHost.count(cast<VarDecl>(D))); } @@ -11725,85 +14183,73 @@ StringRef ASTContext::getCUIDHash() const { return CUIDHash; } -// Get the closest named parent, so we can order the sycl naming decls somewhere -// that mangling is meaningful. -static const DeclContext *GetNamedParent(const CXXRecordDecl *RD) { - const DeclContext *DC = RD->getDeclContext(); - - while (!isa<NamedDecl, TranslationUnitDecl>(DC)) - DC = DC->getParent(); - return DC; -} - -void ASTContext::AddSYCLKernelNamingDecl(const CXXRecordDecl *RD) { - assert(getLangOpts().isSYCL() && "Only valid for SYCL programs"); - RD = RD->getCanonicalDecl(); - const DeclContext *DC = GetNamedParent(RD); - - assert(RD->getLocation().isValid() && - "Invalid location on kernel naming decl"); - - (void)SYCLKernelNamingTypes[DC].insert(RD); -} - -bool ASTContext::IsSYCLKernelNamingDecl(const NamedDecl *ND) const { - assert(getLangOpts().isSYCL() && "Only valid for SYCL programs"); - const auto *RD = dyn_cast<CXXRecordDecl>(ND); - if (!RD) - return false; - RD = RD->getCanonicalDecl(); - const DeclContext *DC = GetNamedParent(RD); - - auto Itr = SYCLKernelNamingTypes.find(DC); - - if (Itr == SYCLKernelNamingTypes.end()) - return false; - - return Itr->getSecond().count(RD); -} - -// Filters the Decls list to those that share the lambda mangling with the -// passed RD. -void ASTContext::FilterSYCLKernelNamingDecls( - const CXXRecordDecl *RD, - llvm::SmallVectorImpl<const CXXRecordDecl *> &Decls) { - - if (!SYCLKernelFilterContext) - SYCLKernelFilterContext.reset( - ItaniumMangleContext::create(*this, getDiagnostics())); - - llvm::SmallString<128> LambdaSig; - llvm::raw_svector_ostream Out(LambdaSig); - SYCLKernelFilterContext->mangleLambdaSig(RD, Out); - - llvm::erase_if(Decls, [this, &LambdaSig](const CXXRecordDecl *LocalRD) { - llvm::SmallString<128> LocalLambdaSig; - llvm::raw_svector_ostream LocalOut(LocalLambdaSig); - SYCLKernelFilterContext->mangleLambdaSig(LocalRD, LocalOut); - return LambdaSig != LocalLambdaSig; - }); +const CXXRecordDecl * +ASTContext::baseForVTableAuthentication(const CXXRecordDecl *ThisClass) { + assert(ThisClass); + assert(ThisClass->isPolymorphic()); + const CXXRecordDecl *PrimaryBase = ThisClass; + while (1) { + assert(PrimaryBase); + assert(PrimaryBase->isPolymorphic()); + auto &Layout = getASTRecordLayout(PrimaryBase); + auto Base = Layout.getPrimaryBase(); + if (!Base || Base == PrimaryBase || !Base->isPolymorphic()) + break; + PrimaryBase = Base; + } + return PrimaryBase; } -unsigned ASTContext::GetSYCLKernelNamingIndex(const NamedDecl *ND) { - assert(getLangOpts().isSYCL() && "Only valid for SYCL programs"); - assert(IsSYCLKernelNamingDecl(ND) && - "Lambda not involved in mangling asked for a naming index?"); - - const CXXRecordDecl *RD = cast<CXXRecordDecl>(ND)->getCanonicalDecl(); - const DeclContext *DC = GetNamedParent(RD); +bool ASTContext::useAbbreviatedThunkName(GlobalDecl VirtualMethodDecl, + StringRef MangledName) { + auto *Method = cast<CXXMethodDecl>(VirtualMethodDecl.getDecl()); + assert(Method->isVirtual()); + bool DefaultIncludesPointerAuth = + LangOpts.PointerAuthCalls || LangOpts.PointerAuthIntrinsics; - auto Itr = SYCLKernelNamingTypes.find(DC); - assert(Itr != SYCLKernelNamingTypes.end() && "Not a valid DeclContext?"); - - const llvm::SmallPtrSet<const CXXRecordDecl *, 4> &Set = Itr->getSecond(); - - llvm::SmallVector<const CXXRecordDecl *> Decls{Set.begin(), Set.end()}; - - FilterSYCLKernelNamingDecls(RD, Decls); + if (!DefaultIncludesPointerAuth) + return true; - llvm::sort(Decls, [](const CXXRecordDecl *LHS, const CXXRecordDecl *RHS) { - return LHS->getLambdaManglingNumber() < RHS->getLambdaManglingNumber(); - }); + auto Existing = ThunksToBeAbbreviated.find(VirtualMethodDecl); + if (Existing != ThunksToBeAbbreviated.end()) + return Existing->second.contains(MangledName.str()); + + std::unique_ptr<MangleContext> Mangler(createMangleContext()); + llvm::StringMap<llvm::SmallVector<std::string, 2>> Thunks; + auto VtableContext = getVTableContext(); + if (const auto *ThunkInfos = VtableContext->getThunkInfo(VirtualMethodDecl)) { + auto *Destructor = dyn_cast<CXXDestructorDecl>(Method); + for (const auto &Thunk : *ThunkInfos) { + SmallString<256> ElidedName; + llvm::raw_svector_ostream ElidedNameStream(ElidedName); + if (Destructor) + Mangler->mangleCXXDtorThunk(Destructor, VirtualMethodDecl.getDtorType(), + Thunk, /* elideOverrideInfo */ true, + ElidedNameStream); + else + Mangler->mangleThunk(Method, Thunk, /* elideOverrideInfo */ true, + ElidedNameStream); + SmallString<256> MangledName; + llvm::raw_svector_ostream mangledNameStream(MangledName); + if (Destructor) + Mangler->mangleCXXDtorThunk(Destructor, VirtualMethodDecl.getDtorType(), + Thunk, /* elideOverrideInfo */ false, + mangledNameStream); + else + Mangler->mangleThunk(Method, Thunk, /* elideOverrideInfo */ false, + mangledNameStream); - return llvm::find(Decls, RD) - Decls.begin(); + if (Thunks.find(ElidedName) == Thunks.end()) + Thunks[ElidedName] = {}; + Thunks[ElidedName].push_back(std::string(MangledName)); + } + } + llvm::StringSet<> SimplifiedThunkNames; + for (auto &ThunkList : Thunks) { + llvm::sort(ThunkList.second); + SimplifiedThunkNames.insert(ThunkList.second[0]); + } + bool Result = SimplifiedThunkNames.contains(MangledName); + ThunksToBeAbbreviated[VirtualMethodDecl] = std::move(SimplifiedThunkNames); + return Result; } diff --git a/contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp b/contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp index dc22481d0a84..0680ff5e3a38 100644 --- a/contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp +++ b/contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp @@ -25,8 +25,10 @@ using namespace clang; // Returns a desugared version of the QualType, and marks ShouldAKA as true -// whenever we remove significant sugar from the type. -static QualType Desugar(ASTContext &Context, QualType QT, bool &ShouldAKA) { +// whenever we remove significant sugar from the type. Make sure ShouldAKA +// is initialized before passing it in. +QualType clang::desugarForDiagnostic(ASTContext &Context, QualType QT, + bool &ShouldAKA) { QualifierCollector QC; while (true) { @@ -37,6 +39,11 @@ static QualType Desugar(ASTContext &Context, QualType QT, bool &ShouldAKA) { QT = ET->desugar(); continue; } + // ... or a using type ... + if (const UsingType *UT = dyn_cast<UsingType>(Ty)) { + QT = UT->desugar(); + continue; + } // ... or a paren type ... if (const ParenType *PT = dyn_cast<ParenType>(Ty)) { QT = PT->desugar(); @@ -76,7 +83,7 @@ static QualType Desugar(ASTContext &Context, QualType QT, bool &ShouldAKA) { if (const FunctionType *FT = dyn_cast<FunctionType>(Ty)) { bool DesugarReturn = false; QualType SugarRT = FT->getReturnType(); - QualType RT = Desugar(Context, SugarRT, DesugarReturn); + QualType RT = desugarForDiagnostic(Context, SugarRT, DesugarReturn); if (auto nullability = AttributedType::stripOuterNullability(SugarRT)) { RT = Context.getAttributedType( AttributedType::getNullabilityAttrKind(*nullability), RT, RT); @@ -87,7 +94,7 @@ static QualType Desugar(ASTContext &Context, QualType QT, bool &ShouldAKA) { const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT); if (FPT) { for (QualType SugarPT : FPT->param_types()) { - QualType PT = Desugar(Context, SugarPT, DesugarArgument); + QualType PT = desugarForDiagnostic(Context, SugarPT, DesugarArgument); if (auto nullability = AttributedType::stripOuterNullability(SugarPT)) { PT = Context.getAttributedType( @@ -112,10 +119,10 @@ static QualType Desugar(ASTContext &Context, QualType QT, bool &ShouldAKA) { if (!TST->isTypeAlias()) { bool DesugarArgument = false; SmallVector<TemplateArgument, 4> Args; - for (unsigned I = 0, N = TST->getNumArgs(); I != N; ++I) { - const TemplateArgument &Arg = TST->getArg(I); + for (const TemplateArgument &Arg : TST->template_arguments()) { if (Arg.getKind() == TemplateArgument::Type) - Args.push_back(Desugar(Context, Arg.getAsType(), DesugarArgument)); + Args.push_back(desugarForDiagnostic(Context, Arg.getAsType(), + DesugarArgument)); else Args.push_back(Arg); } @@ -129,6 +136,29 @@ static QualType Desugar(ASTContext &Context, QualType QT, bool &ShouldAKA) { } } + if (const auto *AT = dyn_cast<ArrayType>(Ty)) { + QualType ElementTy = + desugarForDiagnostic(Context, AT->getElementType(), ShouldAKA); + if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) + QT = Context.getConstantArrayType( + ElementTy, CAT->getSize(), CAT->getSizeExpr(), + CAT->getSizeModifier(), CAT->getIndexTypeCVRQualifiers()); + else if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) + QT = Context.getVariableArrayType( + ElementTy, VAT->getSizeExpr(), VAT->getSizeModifier(), + VAT->getIndexTypeCVRQualifiers(), VAT->getBracketsRange()); + else if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(AT)) + QT = Context.getDependentSizedArrayType( + ElementTy, DSAT->getSizeExpr(), DSAT->getSizeModifier(), + DSAT->getIndexTypeCVRQualifiers(), DSAT->getBracketsRange()); + else if (const auto *IAT = dyn_cast<IncompleteArrayType>(AT)) + QT = Context.getIncompleteArrayType(ElementTy, IAT->getSizeModifier(), + IAT->getIndexTypeCVRQualifiers()); + else + llvm_unreachable("Unhandled array type"); + break; + } + // Don't desugar magic Objective-C types. if (QualType(Ty,0) == Context.getObjCIdType() || QualType(Ty,0) == Context.getObjCClassType() || @@ -181,24 +211,25 @@ break; \ // If we have a pointer-like type, desugar the pointee as well. // FIXME: Handle other pointer-like types. if (const PointerType *Ty = QT->getAs<PointerType>()) { - QT = Context.getPointerType(Desugar(Context, Ty->getPointeeType(), - ShouldAKA)); + QT = Context.getPointerType( + desugarForDiagnostic(Context, Ty->getPointeeType(), ShouldAKA)); } else if (const auto *Ty = QT->getAs<ObjCObjectPointerType>()) { - QT = Context.getObjCObjectPointerType(Desugar(Context, Ty->getPointeeType(), - ShouldAKA)); + QT = Context.getObjCObjectPointerType( + desugarForDiagnostic(Context, Ty->getPointeeType(), ShouldAKA)); } else if (const LValueReferenceType *Ty = QT->getAs<LValueReferenceType>()) { - QT = Context.getLValueReferenceType(Desugar(Context, Ty->getPointeeType(), - ShouldAKA)); + QT = Context.getLValueReferenceType( + desugarForDiagnostic(Context, Ty->getPointeeType(), ShouldAKA)); } else if (const RValueReferenceType *Ty = QT->getAs<RValueReferenceType>()) { - QT = Context.getRValueReferenceType(Desugar(Context, Ty->getPointeeType(), - ShouldAKA)); + QT = Context.getRValueReferenceType( + desugarForDiagnostic(Context, Ty->getPointeeType(), ShouldAKA)); } else if (const auto *Ty = QT->getAs<ObjCObjectType>()) { if (Ty->getBaseType().getTypePtr() != Ty && !ShouldAKA) { - QualType BaseType = Desugar(Context, Ty->getBaseType(), ShouldAKA); - QT = Context.getObjCObjectType(BaseType, Ty->getTypeArgsAsWritten(), - llvm::makeArrayRef(Ty->qual_begin(), - Ty->getNumProtocols()), - Ty->isKindOfTypeAsWritten()); + QualType BaseType = + desugarForDiagnostic(Context, Ty->getBaseType(), ShouldAKA); + QT = Context.getObjCObjectType( + BaseType, Ty->getTypeArgsAsWritten(), + llvm::ArrayRef(Ty->qual_begin(), Ty->getNumProtocols()), + Ty->isKindOfTypeAsWritten()); } } @@ -239,9 +270,9 @@ ConvertTypeToDiagnosticString(ASTContext &Context, QualType Ty, std::string S = Ty.getAsString(Context.getPrintingPolicy()); std::string CanS = CanTy.getAsString(Context.getPrintingPolicy()); - for (unsigned I = 0, E = QualTypeVals.size(); I != E; ++I) { + for (const intptr_t &QualTypeVal : QualTypeVals) { QualType CompareTy = - QualType::getFromOpaquePtr(reinterpret_cast<void*>(QualTypeVals[I])); + QualType::getFromOpaquePtr(reinterpret_cast<void *>(QualTypeVal)); if (CompareTy.isNull()) continue; if (CompareTy == Ty) @@ -251,7 +282,8 @@ ConvertTypeToDiagnosticString(ASTContext &Context, QualType Ty, continue; // Same canonical types std::string CompareS = CompareTy.getAsString(Context.getPrintingPolicy()); bool ShouldAKA = false; - QualType CompareDesugar = Desugar(Context, CompareTy, ShouldAKA); + QualType CompareDesugar = + desugarForDiagnostic(Context, CompareTy, ShouldAKA); std::string CompareDesugarStr = CompareDesugar.getAsString(Context.getPrintingPolicy()); if (CompareS != S && CompareDesugarStr != S) @@ -270,11 +302,11 @@ ConvertTypeToDiagnosticString(ASTContext &Context, QualType Ty, // Check to see if we already desugared this type in this // diagnostic. If so, don't do it again. bool Repeated = false; - for (unsigned i = 0, e = PrevArgs.size(); i != e; ++i) { + for (const auto &PrevArg : PrevArgs) { // TODO: Handle ak_declcontext case. - if (PrevArgs[i].first == DiagnosticsEngine::ak_qualtype) { - void *Ptr = (void*)PrevArgs[i].second; - QualType PrevTy(QualType::getFromOpaquePtr(Ptr)); + if (PrevArg.first == DiagnosticsEngine::ak_qualtype) { + QualType PrevTy( + QualType::getFromOpaquePtr(reinterpret_cast<void *>(PrevArg.second))); if (PrevTy == Ty) { Repeated = true; break; @@ -286,7 +318,7 @@ ConvertTypeToDiagnosticString(ASTContext &Context, QualType Ty, // sugar gives us something "significantly different". if (!Repeated) { bool ShouldAKA = false; - QualType DesugaredTy = Desugar(Context, Ty, ShouldAKA); + QualType DesugaredTy = desugarForDiagnostic(Context, Ty, ShouldAKA); if (ShouldAKA || ForceAKA) { if (DesugaredTy == Ty) { DesugaredTy = Ty.getCanonicalType(); @@ -308,7 +340,7 @@ ConvertTypeToDiagnosticString(ASTContext &Context, QualType Ty, OS << "'" << S << "' (vector of " << VTy->getNumElements() << " '" << VTy->getElementType().getAsString(Context.getPrintingPolicy()) << "' " << Values << ")"; - return OS.str(); + return DecoratedString; } } @@ -340,7 +372,7 @@ void clang::FormatASTNodeDiagnosticArgument( default: llvm_unreachable("unknown ArgumentKind"); case DiagnosticsEngine::ak_addrspace: { assert(Modifier.empty() && Argument.empty() && - "Invalid modifier for Qualfiers argument"); + "Invalid modifier for Qualifiers argument"); auto S = Qualifiers::getAddrSpaceAsString(static_cast<LangAS>(Val)); if (S.empty()) { @@ -355,7 +387,7 @@ void clang::FormatASTNodeDiagnosticArgument( } case DiagnosticsEngine::ak_qual: { assert(Modifier.empty() && Argument.empty() && - "Invalid modifier for Qualfiers argument"); + "Invalid modifier for Qualifiers argument"); Qualifiers Q(Qualifiers::fromOpaqueValue(Val)); auto S = Q.getAsString(); @@ -393,7 +425,7 @@ void clang::FormatASTNodeDiagnosticArgument( Modifier = StringRef(); Argument = StringRef(); // Fall through - LLVM_FALLTHROUGH; + [[fallthrough]]; } case DiagnosticsEngine::ak_qualtype: { assert(Modifier.empty() && Argument.empty() && @@ -507,7 +539,7 @@ class TemplateDiff { bool ShowColor; /// FromTemplateType - When single type printing is selected, this is the - /// type to be be printed. When tree printing is selected, this type will + /// type to be printed. When tree printing is selected, this type will /// show up first in the tree. QualType FromTemplateType; @@ -954,7 +986,7 @@ class TemplateDiff { if (isEnd()) return; // Set to first template argument. If not a parameter pack, done. - TemplateArgument TA = TST->getArg(0); + TemplateArgument TA = TST->template_arguments()[0]; if (TA.getKind() != TemplateArgument::Pack) return; // Start looking into the parameter pack. @@ -975,7 +1007,7 @@ class TemplateDiff { /// isEnd - Returns true if the iterator is one past the end. bool isEnd() const { assert(TST && "InternalIterator is invalid with a null TST."); - return Index >= TST->getNumArgs(); + return Index >= TST->template_arguments().size(); } /// &operator++ - Increment the iterator to the next template argument. @@ -995,11 +1027,11 @@ class TemplateDiff { // Loop until a template argument is found, or the end is reached. while (true) { // Advance to the next template argument. Break if reached the end. - if (++Index == TST->getNumArgs()) + if (++Index == TST->template_arguments().size()) break; // If the TemplateArgument is not a parameter pack, done. - TemplateArgument TA = TST->getArg(Index); + TemplateArgument TA = TST->template_arguments()[Index]; if (TA.getKind() != TemplateArgument::Pack) break; @@ -1019,7 +1051,7 @@ class TemplateDiff { assert(TST && "InternalIterator is invalid with a null TST."); assert(!isEnd() && "Index exceeds number of arguments."); if (CurrentTA == EndTA) - return TST->getArg(Index); + return TST->template_arguments()[Index]; else return *CurrentTA; } @@ -1088,6 +1120,9 @@ class TemplateDiff { Ty->getAs<TemplateSpecializationType>()) return TST; + if (const auto* SubstType = Ty->getAs<SubstTemplateTypeParmType>()) + Ty = SubstType->getReplacementType(); + const RecordType *RT = Ty->getAs<RecordType>(); if (!RT) @@ -1180,46 +1215,19 @@ class TemplateDiff { bool &NeedAddressOf) { if (!Iter.isEnd()) { switch (Iter->getKind()) { - default: - llvm_unreachable("unknown ArgumentKind"); - case TemplateArgument::Integral: - Value = Iter->getAsIntegral(); - HasInt = true; - IntType = Iter->getIntegralType(); - return; - case TemplateArgument::Declaration: { - VD = Iter->getAsDecl(); - QualType ArgType = Iter->getParamTypeForDecl(); - QualType VDType = VD->getType(); - if (ArgType->isPointerType() && - Context.hasSameType(ArgType->getPointeeType(), VDType)) - NeedAddressOf = true; - return; - } - case TemplateArgument::NullPtr: - IsNullPtr = true; - return; - case TemplateArgument::Expression: - E = Iter->getAsExpr(); - } - } else if (!Default->isParameterPack()) { - E = Default->getDefaultArgument(); - } - - if (!Iter.hasDesugaredTA()) return; - - const TemplateArgument& TA = Iter.getDesugaredTA(); - switch (TA.getKind()) { - default: - llvm_unreachable("unknown ArgumentKind"); + case TemplateArgument::StructuralValue: + // FIXME: Diffing of structural values is not implemented. + // There is no possible fallback in this case, this will show up + // as '(no argument)'. + return; case TemplateArgument::Integral: - Value = TA.getAsIntegral(); + Value = Iter->getAsIntegral(); HasInt = true; - IntType = TA.getIntegralType(); + IntType = Iter->getIntegralType(); return; case TemplateArgument::Declaration: { - VD = TA.getAsDecl(); - QualType ArgType = TA.getParamTypeForDecl(); + VD = Iter->getAsDecl(); + QualType ArgType = Iter->getParamTypeForDecl(); QualType VDType = VD->getType(); if (ArgType->isPointerType() && Context.hasSameType(ArgType->getPointeeType(), VDType)) @@ -1230,13 +1238,62 @@ class TemplateDiff { IsNullPtr = true; return; case TemplateArgument::Expression: - // TODO: Sometimes, the desugared template argument Expr differs from - // the sugared template argument Expr. It may be useful in the future - // but for now, it is just discarded. - if (!E) - E = TA.getAsExpr(); - return; + E = Iter->getAsExpr(); + break; + case TemplateArgument::Null: + case TemplateArgument::Type: + case TemplateArgument::Template: + case TemplateArgument::TemplateExpansion: + llvm_unreachable("TemplateArgument kind is not expected for NTTP"); + case TemplateArgument::Pack: + llvm_unreachable("TemplateArgument kind should be handled elsewhere"); + } + } else if (!Default->isParameterPack()) { + E = Default->getDefaultArgument().getArgument().getAsExpr(); + } + + if (!Iter.hasDesugaredTA()) + return; + + const TemplateArgument &TA = Iter.getDesugaredTA(); + switch (TA.getKind()) { + case TemplateArgument::StructuralValue: + // FIXME: Diffing of structural values is not implemented. + // Just fall back to the expression. + return; + case TemplateArgument::Integral: + Value = TA.getAsIntegral(); + HasInt = true; + IntType = TA.getIntegralType(); + return; + case TemplateArgument::Declaration: { + VD = TA.getAsDecl(); + QualType ArgType = TA.getParamTypeForDecl(); + QualType VDType = VD->getType(); + if (ArgType->isPointerType() && + Context.hasSameType(ArgType->getPointeeType(), VDType)) + NeedAddressOf = true; + return; } + case TemplateArgument::NullPtr: + IsNullPtr = true; + return; + case TemplateArgument::Expression: + // TODO: Sometimes, the desugared template argument Expr differs from + // the sugared template argument Expr. It may be useful in the future + // but for now, it is just discarded. + if (!E) + E = TA.getAsExpr(); + return; + case TemplateArgument::Null: + case TemplateArgument::Type: + case TemplateArgument::Template: + case TemplateArgument::TemplateExpansion: + llvm_unreachable("TemplateArgument kind is not expected for NTTP"); + case TemplateArgument::Pack: + llvm_unreachable("TemplateArgument kind should be handled elsewhere"); + } + llvm_unreachable("Unexpected TemplateArgument kind"); } /// DiffNonTypes - Handles any template parameters not handled by DiffTypes @@ -1649,9 +1706,24 @@ class TemplateDiff { : FromType.getAsString(Policy); std::string ToTypeStr = ToType.isNull() ? "(no argument)" : ToType.getAsString(Policy); - // Switch to canonical typename if it is better. + // Print without ElaboratedType sugar if it is better. // TODO: merge this with other aka printing above. if (FromTypeStr == ToTypeStr) { + const auto *FromElTy = dyn_cast<ElaboratedType>(FromType), + *ToElTy = dyn_cast<ElaboratedType>(ToType); + if (FromElTy || ToElTy) { + std::string FromNamedTypeStr = + FromElTy ? FromElTy->getNamedType().getAsString(Policy) + : FromTypeStr; + std::string ToNamedTypeStr = + ToElTy ? ToElTy->getNamedType().getAsString(Policy) : ToTypeStr; + if (FromNamedTypeStr != ToNamedTypeStr) { + FromTypeStr = FromNamedTypeStr; + ToTypeStr = ToNamedTypeStr; + goto PrintTypes; + } + } + // Switch to canonical typename if it is better. std::string FromCanTypeStr = FromType.getCanonicalType().getAsString(Policy); std::string ToCanTypeStr = ToType.getCanonicalType().getAsString(Policy); @@ -1661,6 +1733,7 @@ class TemplateDiff { } } + PrintTypes: if (PrintTree) OS << '['; OS << (FromDefault ? "(default) " : ""); Bold(); @@ -1839,10 +1912,11 @@ class TemplateDiff { // FIXME: Diffing the APValue would be neat. // FIXME: Suppress this and use the full name of the declaration if the // parameter is a pointer or reference. - TPO->printAsInit(OS); + TPO->getType().getUnqualifiedType().print(OS, Policy); + TPO->printAsInit(OS, Policy); return; } - VD->printName(OS); + VD->printName(OS, Policy); return; } @@ -1862,6 +1936,11 @@ class TemplateDiff { return; } + if (E) { + PrintExpr(E); + return; + } + OS << "(no argument)"; } diff --git a/contrib/llvm-project/clang/lib/AST/ASTDumper.cpp b/contrib/llvm-project/clang/lib/AST/ASTDumper.cpp index 3d368a0a7b63..864d0393f9a7 100644 --- a/contrib/llvm-project/clang/lib/AST/ASTDumper.cpp +++ b/contrib/llvm-project/clang/lib/AST/ASTDumper.cpp @@ -12,16 +12,44 @@ //===----------------------------------------------------------------------===// #include "clang/AST/ASTDumper.h" +#include "clang/AST/ASTConcept.h" #include "clang/AST/ASTContext.h" #include "clang/AST/DeclLookups.h" #include "clang/AST/JSONNodeDumper.h" #include "clang/Basic/Builtins.h" -#include "clang/Basic/Module.h" #include "clang/Basic/SourceManager.h" #include "llvm/Support/raw_ostream.h" + using namespace clang; using namespace clang::comments; +void ASTDumper::dumpInvalidDeclContext(const DeclContext *DC) { + NodeDumper.AddChild([=] { + if (!DC) { + ColorScope Color(OS, ShowColors, NullColor); + OS << "<<<NULL>>>"; + return; + } + // An invalid DeclContext is one for which a dyn_cast() from a DeclContext + // pointer to a Decl pointer would fail an assertion or otherwise fall prey + // to undefined behavior as a result of an invalid associated DeclKind. + // Such invalidity is not supposed to happen of course, but, when it does, + // the information provided below is intended to provide some hints about + // what might have gone awry. + { + ColorScope Color(OS, ShowColors, DeclKindNameColor); + OS << "DeclContext"; + } + NodeDumper.dumpPointer(DC); + OS << " <"; + { + ColorScope Color(OS, ShowColors, DeclNameColor); + OS << "unrecognized Decl kind " << (unsigned)DC->getDeclKind(); + } + OS << ">"; + }); +} + void ASTDumper::dumpLookups(const DeclContext *DC, bool DumpDecls) { NodeDumper.AddChild([=] { OS << "StoredDeclsMap "; @@ -90,21 +118,13 @@ void ASTDumper::dumpTemplateDeclSpecialization(const SpecializationDecl *D, // FIXME: The redecls() range sometimes has elements of a less-specific // type. (In particular, ClassTemplateSpecializationDecl::redecls() gives // us TagDecls, and should give CXXRecordDecls). - auto *Redecl = dyn_cast<SpecializationDecl>(RedeclWithBadType); - if (!Redecl) { - // Found the injected-class-name for a class template. This will be dumped - // as part of its surrounding class so we don't need to dump it here. - assert(isa<CXXRecordDecl>(RedeclWithBadType) && - "expected an injected-class-name"); - continue; - } - + auto *Redecl = cast<SpecializationDecl>(RedeclWithBadType); switch (Redecl->getTemplateSpecializationKind()) { case TSK_ExplicitInstantiationDeclaration: case TSK_ExplicitInstantiationDefinition: if (!DumpExplicitInst) break; - LLVM_FALLTHROUGH; + [[fallthrough]]; case TSK_Undeclared: case TSK_ImplicitInstantiation: if (DumpRefOnly) @@ -180,6 +200,19 @@ LLVM_DUMP_METHOD void Type::dump(llvm::raw_ostream &OS, } //===----------------------------------------------------------------------===// +// TypeLoc method implementations +//===----------------------------------------------------------------------===// + +LLVM_DUMP_METHOD void TypeLoc::dump() const { + ASTDumper(llvm::errs(), /*ShowColors=*/false).Visit(*this); +} + +LLVM_DUMP_METHOD void TypeLoc::dump(llvm::raw_ostream &OS, + const ASTContext &Context) const { + ASTDumper(OS, Context, Context.getDiagnostics().getShowColors()).Visit(*this); +} + +//===----------------------------------------------------------------------===// // Decl method implementations //===----------------------------------------------------------------------===// @@ -208,6 +241,31 @@ LLVM_DUMP_METHOD void Decl::dumpColor() const { P.Visit(this); } +LLVM_DUMP_METHOD void DeclContext::dumpAsDecl() const { + dumpAsDecl(nullptr); +} + +LLVM_DUMP_METHOD void DeclContext::dumpAsDecl(const ASTContext *Ctx) const { + // By design, DeclContext is required to be a base class of some class that + // derives from Decl. Thus, it should always be possible to dyn_cast() from + // a DeclContext pointer to a Decl pointer and Decl::castFromDeclContext() + // asserts that to be the case. Since this function is intended for use in a + // debugger, it performs an additional check in order to prevent a failed + // cast and assertion. If that check fails, then the (invalid) DeclContext + // is dumped with an indication of its invalidity. + if (hasValidDeclKind()) { + const auto *D = cast<Decl>(this); + D->dump(); + } else { + // If an ASTContext is not available, a less capable ASTDumper is + // constructed for which color diagnostics are, regrettably, disabled. + ASTDumper P = Ctx ? ASTDumper(llvm::errs(), *Ctx, + Ctx->getDiagnostics().getShowColors()) + : ASTDumper(llvm::errs(), /*ShowColors*/ false); + P.dumpInvalidDeclContext(this); + } +} + LLVM_DUMP_METHOD void DeclContext::dumpLookups() const { dumpLookups(llvm::errs()); } @@ -284,7 +342,54 @@ LLVM_DUMP_METHOD void APValue::dump() const { LLVM_DUMP_METHOD void APValue::dump(raw_ostream &OS, const ASTContext &Context) const { - ASTDumper Dumper(llvm::errs(), Context, - Context.getDiagnostics().getShowColors()); + ASTDumper Dumper(OS, Context, Context.getDiagnostics().getShowColors()); Dumper.Visit(*this, /*Ty=*/Context.getPointerType(Context.CharTy)); } + +//===----------------------------------------------------------------------===// +// ConceptReference method implementations +//===----------------------------------------------------------------------===// + +LLVM_DUMP_METHOD void ConceptReference::dump() const { + dump(llvm::errs()); +} + +LLVM_DUMP_METHOD void ConceptReference::dump(raw_ostream &OS) const { + auto &Ctx = getNamedConcept()->getASTContext(); + ASTDumper P(OS, Ctx, Ctx.getDiagnostics().getShowColors()); + P.Visit(this); +} + +//===----------------------------------------------------------------------===// +// TemplateName method implementations +//===----------------------------------------------------------------------===// + +// FIXME: These are actually using the TemplateArgument dumper, through +// an implicit conversion. The dump will claim this is a template argument, +// which is misleading. + +LLVM_DUMP_METHOD void TemplateName::dump() const { + ASTDumper Dumper(llvm::errs(), /*ShowColors=*/false); + Dumper.Visit(*this); +} + +LLVM_DUMP_METHOD void TemplateName::dump(llvm::raw_ostream &OS, + const ASTContext &Context) const { + ASTDumper Dumper(OS, Context, Context.getDiagnostics().getShowColors()); + Dumper.Visit(*this); +} + +//===----------------------------------------------------------------------===// +// TemplateArgument method implementations +//===----------------------------------------------------------------------===// + +LLVM_DUMP_METHOD void TemplateArgument::dump() const { + ASTDumper Dumper(llvm::errs(), /*ShowColors=*/false); + Dumper.Visit(*this); +} + +LLVM_DUMP_METHOD void TemplateArgument::dump(llvm::raw_ostream &OS, + const ASTContext &Context) const { + ASTDumper Dumper(OS, Context, Context.getDiagnostics().getShowColors()); + Dumper.Visit(*this); +} diff --git a/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp b/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp index 787e02029dae..e95992b99f7e 100644 --- a/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp +++ b/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp @@ -12,9 +12,9 @@ //===----------------------------------------------------------------------===// #include "clang/AST/ASTImporter.h" -#include "clang/AST/ASTImporterSharedState.h" #include "clang/AST/ASTContext.h" #include "clang/AST/ASTDiagnostic.h" +#include "clang/AST/ASTImporterSharedState.h" #include "clang/AST/ASTStructuralEquivalence.h" #include "clang/AST/Attr.h" #include "clang/AST/Decl.h" @@ -56,10 +56,8 @@ #include "llvm/ADT/APSInt.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/DenseMap.h" -#include "llvm/ADT/None.h" -#include "llvm/ADT/Optional.h" -#include "llvm/ADT/ScopeExit.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/ScopeExit.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" @@ -68,6 +66,7 @@ #include <cassert> #include <cstddef> #include <memory> +#include <optional> #include <type_traits> #include <utility> @@ -76,6 +75,7 @@ namespace clang { using llvm::make_error; using llvm::Error; using llvm::Expected; + using ExpectedTypePtr = llvm::Expected<const Type *>; using ExpectedType = llvm::Expected<QualType>; using ExpectedStmt = llvm::Expected<Stmt *>; using ExpectedExpr = llvm::Expected<Expr *>; @@ -83,7 +83,7 @@ namespace clang { using ExpectedSLoc = llvm::Expected<SourceLocation>; using ExpectedName = llvm::Expected<DeclarationName>; - std::string ImportError::toString() const { + std::string ASTImportError::toString() const { // FIXME: Improve error texts. switch (Error) { case NameConflict: @@ -97,15 +97,13 @@ namespace clang { return "Invalid error code."; } - void ImportError::log(raw_ostream &OS) const { - OS << toString(); - } + void ASTImportError::log(raw_ostream &OS) const { OS << toString(); } - std::error_code ImportError::convertToErrorCode() const { + std::error_code ASTImportError::convertToErrorCode() const { llvm_unreachable("Function not implemented."); } - char ImportError::ID; + char ASTImportError::ID; template <class T> SmallVector<Decl *, 2> @@ -137,6 +135,46 @@ namespace clang { To->setIsUsed(); } + /// How to handle import errors that occur when import of a child declaration + /// of a DeclContext fails. + class ChildErrorHandlingStrategy { + /// This context is imported (in the 'from' domain). + /// It is nullptr if a non-DeclContext is imported. + const DeclContext *const FromDC; + /// Ignore import errors of the children. + /// If true, the context can be imported successfully if a child + /// of it failed to import. Otherwise the import errors of the child nodes + /// are accumulated (joined) into the import error object of the parent. + /// (Import of a parent can fail in other ways.) + bool const IgnoreChildErrors; + + public: + ChildErrorHandlingStrategy(const DeclContext *FromDC) + : FromDC(FromDC), IgnoreChildErrors(!isa<TagDecl>(FromDC)) {} + ChildErrorHandlingStrategy(const Decl *FromD) + : FromDC(dyn_cast<DeclContext>(FromD)), + IgnoreChildErrors(!isa<TagDecl>(FromD)) {} + + /// Process the import result of a child (of the current declaration). + /// \param ResultErr The import error that can be used as result of + /// importing the parent. This may be changed by the function. + /// \param ChildErr Result of importing a child. Can be success or error. + void handleChildImportResult(Error &ResultErr, Error &&ChildErr) { + if (ChildErr && !IgnoreChildErrors) + ResultErr = joinErrors(std::move(ResultErr), std::move(ChildErr)); + else + consumeError(std::move(ChildErr)); + } + + /// Determine if import failure of a child does not cause import failure of + /// its parent. + bool ignoreChildErrorOnParent(Decl *FromChildD) const { + if (!IgnoreChildErrors || !FromDC) + return false; + return FromDC->containsDecl(FromChildD); + } + }; + class ASTNodeImporter : public TypeVisitor<ASTNodeImporter, ExpectedType>, public DeclVisitor<ASTNodeImporter, ExpectedDecl>, public StmtVisitor<ASTNodeImporter, ExpectedStmt> { @@ -144,13 +182,13 @@ namespace clang { // Use this instead of Importer.importInto . template <typename ImportT> - LLVM_NODISCARD Error importInto(ImportT &To, const ImportT &From) { + [[nodiscard]] Error importInto(ImportT &To, const ImportT &From) { return Importer.importInto(To, From); } // Use this to import pointers of specific type. template <typename ImportT> - LLVM_NODISCARD Error importInto(ImportT *&To, ImportT *From) { + [[nodiscard]] Error importInto(ImportT *&To, ImportT *From) { auto ToOrErr = Importer.Import(From); if (ToOrErr) To = cast_or_null<ImportT>(*ToOrErr); @@ -160,7 +198,9 @@ namespace clang { // Call the import function of ASTImporter for a baseclass of type `T` and // cast the return value to `T`. template <typename T> - Expected<T *> import(T *From) { + auto import(T *From) + -> std::conditional_t<std::is_base_of_v<Type, T>, Expected<const T *>, + Expected<T *>> { auto ToOrErr = Importer.Import(From); if (!ToOrErr) return ToOrErr.takeError(); @@ -168,7 +208,7 @@ namespace clang { } template <typename T> - Expected<T *> import(const T *From) { + auto import(const T *From) { return import(const_cast<T *>(From)); } @@ -178,30 +218,14 @@ namespace clang { return Importer.Import(From); } - // Import an Optional<T> by importing the contained T, if any. - template<typename T> - Expected<Optional<T>> import(Optional<T> From) { + // Import an std::optional<T> by importing the contained T, if any. + template <typename T> + Expected<std::optional<T>> import(std::optional<T> From) { if (!From) - return Optional<T>(); + return std::nullopt; return import(*From); } - // Helper for chaining together multiple imports. If an error is detected, - // subsequent imports will return default constructed nodes, so that failure - // can be detected with a single conditional branch after a sequence of - // imports. - template <typename T> T importChecked(Error &Err, const T &From) { - // Don't attempt to import nodes if we hit an error earlier. - if (Err) - return T{}; - Expected<T> MaybeVal = import(From); - if (!MaybeVal) { - Err = MaybeVal.takeError(); - return T{}; - } - return *MaybeVal; - } - ExplicitSpecifier importExplicitSpecifier(Error &Err, ExplicitSpecifier ESpec); @@ -219,8 +243,8 @@ namespace clang { // then to the already imported Decl. Returns a bool value set to true if // the `FromD` had been imported before. template <typename ToDeclT, typename FromDeclT, typename... Args> - LLVM_NODISCARD bool GetImportedOrCreateDecl(ToDeclT *&ToD, FromDeclT *FromD, - Args &&... args) { + [[nodiscard]] bool GetImportedOrCreateDecl(ToDeclT *&ToD, FromDeclT *FromD, + Args &&...args) { // There may be several overloads of ToDeclT::Create. We must make sure // to call the one which would be chosen by the arguments, thus we use a // wrapper for the overload set. @@ -235,8 +259,8 @@ namespace clang { // GetImportedOrCreateDecl<TypeAliasDecl>(ToTypedef, FromD, ...); template <typename NewDeclT, typename ToDeclT, typename FromDeclT, typename... Args> - LLVM_NODISCARD bool GetImportedOrCreateDecl(ToDeclT *&ToD, FromDeclT *FromD, - Args &&... args) { + [[nodiscard]] bool GetImportedOrCreateDecl(ToDeclT *&ToD, FromDeclT *FromD, + Args &&...args) { CallOverloadedCreateFun<NewDeclT> OC; return GetImportedOrCreateSpecialDecl(ToD, OC, FromD, std::forward<Args>(args)...); @@ -245,9 +269,9 @@ namespace clang { // used, e.g. CXXRecordDecl::CreateLambda . template <typename ToDeclT, typename CreateFunT, typename FromDeclT, typename... Args> - LLVM_NODISCARD bool + [[nodiscard]] bool GetImportedOrCreateSpecialDecl(ToDeclT *&ToD, CreateFunT CreateFun, - FromDeclT *FromD, Args &&... args) { + FromDeclT *FromD, Args &&...args) { if (Importer.getImportDeclErrorIfAny(FromD)) { ToD = nullptr; return true; // Already imported but with error. @@ -258,6 +282,7 @@ namespace clang { ToD = CreateFun(std::forward<Args>(args)...); // Keep track of imported Decls. Importer.RegisterImportedDecl(FromD, ToD); + Importer.SharedState->markAsNewDecl(ToD); InitializeImportedDecl(FromD, ToD); return false; // A new Decl is created. } @@ -313,11 +338,8 @@ namespace clang { auto *ToNamed = cast<NamedDecl>(ToD); DeclContextLookupResult FromLookup = FromDC->lookup(FromNamed->getDeclName()); - for (NamedDecl *ND : FromLookup) - if (ND == FromNamed) { - ToDC->makeDeclVisibleInContext(ToNamed); - break; - } + if (llvm::is_contained(FromLookup, FromNamed)) + ToDC->makeDeclVisibleInContext(ToNamed); } } } @@ -346,53 +368,9 @@ namespace clang { // Importing types ExpectedType VisitType(const Type *T); - ExpectedType VisitAtomicType(const AtomicType *T); - ExpectedType VisitBuiltinType(const BuiltinType *T); - ExpectedType VisitDecayedType(const DecayedType *T); - ExpectedType VisitComplexType(const ComplexType *T); - ExpectedType VisitPointerType(const PointerType *T); - ExpectedType VisitBlockPointerType(const BlockPointerType *T); - ExpectedType VisitLValueReferenceType(const LValueReferenceType *T); - ExpectedType VisitRValueReferenceType(const RValueReferenceType *T); - ExpectedType VisitMemberPointerType(const MemberPointerType *T); - ExpectedType VisitConstantArrayType(const ConstantArrayType *T); - ExpectedType VisitIncompleteArrayType(const IncompleteArrayType *T); - ExpectedType VisitVariableArrayType(const VariableArrayType *T); - ExpectedType VisitDependentSizedArrayType(const DependentSizedArrayType *T); - // FIXME: DependentSizedExtVectorType - ExpectedType VisitVectorType(const VectorType *T); - ExpectedType VisitExtVectorType(const ExtVectorType *T); - ExpectedType VisitFunctionNoProtoType(const FunctionNoProtoType *T); - ExpectedType VisitFunctionProtoType(const FunctionProtoType *T); - ExpectedType VisitUnresolvedUsingType(const UnresolvedUsingType *T); - ExpectedType VisitParenType(const ParenType *T); - ExpectedType VisitTypedefType(const TypedefType *T); - ExpectedType VisitTypeOfExprType(const TypeOfExprType *T); - // FIXME: DependentTypeOfExprType - ExpectedType VisitTypeOfType(const TypeOfType *T); - ExpectedType VisitDecltypeType(const DecltypeType *T); - ExpectedType VisitUnaryTransformType(const UnaryTransformType *T); - ExpectedType VisitAutoType(const AutoType *T); - ExpectedType VisitDeducedTemplateSpecializationType( - const DeducedTemplateSpecializationType *T); - ExpectedType VisitInjectedClassNameType(const InjectedClassNameType *T); - // FIXME: DependentDecltypeType - ExpectedType VisitRecordType(const RecordType *T); - ExpectedType VisitEnumType(const EnumType *T); - ExpectedType VisitAttributedType(const AttributedType *T); - ExpectedType VisitTemplateTypeParmType(const TemplateTypeParmType *T); - ExpectedType VisitSubstTemplateTypeParmType( - const SubstTemplateTypeParmType *T); - ExpectedType VisitTemplateSpecializationType( - const TemplateSpecializationType *T); - ExpectedType VisitElaboratedType(const ElaboratedType *T); - ExpectedType VisitDependentNameType(const DependentNameType *T); - ExpectedType VisitPackExpansionType(const PackExpansionType *T); - ExpectedType VisitDependentTemplateSpecializationType( - const DependentTemplateSpecializationType *T); - ExpectedType VisitObjCInterfaceType(const ObjCInterfaceType *T); - ExpectedType VisitObjCObjectType(const ObjCObjectType *T); - ExpectedType VisitObjCObjectPointerType(const ObjCObjectPointerType *T); +#define TYPE(Class, Base) \ + ExpectedType Visit##Class##Type(const Class##Type *T); +#include "clang/AST/TypeNodes.inc" // Importing declarations Error ImportDeclParts(NamedDecl *D, DeclarationName &Name, NamedDecl *&ToD, @@ -408,6 +386,7 @@ namespace clang { Decl *From, DeclContext *&ToDC, DeclContext *&ToLexicalDC); Error ImportImplicitMethods(const CXXRecordDecl *From, CXXRecordDecl *To); + Error ImportFieldDeclDefinition(const FieldDecl *From, const FieldDecl *To); Expected<CXXCastPath> ImportCastPath(CastExpr *E); Expected<APValue> ImportAPValue(const APValue &FromValue); @@ -444,9 +423,8 @@ namespace clang { Error ImportDefinition( ObjCProtocolDecl *From, ObjCProtocolDecl *To, ImportDefinitionKind Kind = IDK_Default); - Error ImportTemplateArguments( - const TemplateArgument *FromArgs, unsigned NumFromArgs, - SmallVectorImpl<TemplateArgument> &ToArgs); + Error ImportTemplateArguments(ArrayRef<TemplateArgument> FromArgs, + SmallVectorImpl<TemplateArgument> &ToArgs); Expected<TemplateArgument> ImportTemplateArgument(const TemplateArgument &From); @@ -465,8 +443,9 @@ namespace clang { Expected<FunctionTemplateAndArgsTy> ImportFunctionTemplateWithTemplateArgsFromSpecialization( FunctionDecl *FromFD); - Error ImportTemplateParameterLists(const DeclaratorDecl *FromD, - DeclaratorDecl *ToD); + + template <typename DeclTy> + Error ImportTemplateParameterLists(const DeclTy *FromD, DeclTy *ToD); Error ImportTemplateInformation(FunctionDecl *FromFD, FunctionDecl *ToFD); @@ -475,21 +454,14 @@ namespace clang { Error ImportDefaultArgOfParmVarDecl(const ParmVarDecl *FromParam, ParmVarDecl *ToParam); + Expected<InheritedConstructor> + ImportInheritedConstructor(const InheritedConstructor &From); + template <typename T> bool hasSameVisibilityContextAndLinkage(T *Found, T *From); - bool IsStructuralMatch(Decl *From, Decl *To, bool Complain); - bool IsStructuralMatch(RecordDecl *FromRecord, RecordDecl *ToRecord, - bool Complain = true); - bool IsStructuralMatch(VarDecl *FromVar, VarDecl *ToVar, - bool Complain = true); - bool IsStructuralMatch(EnumDecl *FromEnum, EnumDecl *ToRecord); - bool IsStructuralMatch(EnumConstantDecl *FromEC, EnumConstantDecl *ToEC); - bool IsStructuralMatch(FunctionTemplateDecl *From, - FunctionTemplateDecl *To); - bool IsStructuralMatch(FunctionDecl *From, FunctionDecl *To); - bool IsStructuralMatch(ClassTemplateDecl *From, ClassTemplateDecl *To); - bool IsStructuralMatch(VarTemplateDecl *From, VarTemplateDecl *To); + bool IsStructuralMatch(Decl *From, Decl *To, bool Complain = true, + bool IgnoreTemplateParmDepth = false); ExpectedDecl VisitDecl(Decl *D); ExpectedDecl VisitImportDecl(ImportDecl *D); ExpectedDecl VisitEmptyDecl(EmptyDecl *D); @@ -528,6 +500,7 @@ namespace clang { ExpectedDecl VisitUsingDecl(UsingDecl *D); ExpectedDecl VisitUsingShadowDecl(UsingShadowDecl *D); ExpectedDecl VisitUsingDirectiveDecl(UsingDirectiveDecl *D); + ExpectedDecl VisitUsingPackDecl(UsingPackDecl *D); ExpectedDecl ImportUsingShadowDecls(BaseUsingDecl *D, BaseUsingDecl *ToSI); ExpectedDecl VisitUsingEnumDecl(UsingEnumDecl *D); ExpectedDecl VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D); @@ -597,6 +570,8 @@ namespace clang { ExpectedStmt VisitSourceLocExpr(SourceLocExpr *E); ExpectedStmt VisitVAArgExpr(VAArgExpr *E); ExpectedStmt VisitChooseExpr(ChooseExpr *E); + ExpectedStmt VisitConvertVectorExpr(ConvertVectorExpr *E); + ExpectedStmt VisitShuffleVectorExpr(ShuffleVectorExpr *E); ExpectedStmt VisitGNUNullExpr(GNUNullExpr *E); ExpectedStmt VisitGenericSelectionExpr(GenericSelectionExpr *E); ExpectedStmt VisitPredefinedExpr(PredefinedExpr *E); @@ -622,6 +597,7 @@ namespace clang { ExpectedStmt VisitBinaryOperator(BinaryOperator *E); ExpectedStmt VisitConditionalOperator(ConditionalOperator *E); ExpectedStmt VisitBinaryConditionalOperator(BinaryConditionalOperator *E); + ExpectedStmt VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E); ExpectedStmt VisitOpaqueValueExpr(OpaqueValueExpr *E); ExpectedStmt VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E); ExpectedStmt VisitExpressionTraitExpr(ExpressionTraitExpr *E); @@ -667,6 +643,22 @@ namespace clang { ExpectedStmt VisitCXXTypeidExpr(CXXTypeidExpr *E); ExpectedStmt VisitCXXFoldExpr(CXXFoldExpr *E); + // Helper for chaining together multiple imports. If an error is detected, + // subsequent imports will return default constructed nodes, so that failure + // can be detected with a single conditional branch after a sequence of + // imports. + template <typename T> T importChecked(Error &Err, const T &From) { + // Don't attempt to import nodes if we hit an error earlier. + if (Err) + return T{}; + Expected<T> MaybeVal = import(From); + if (!MaybeVal) { + Err = MaybeVal.takeError(); + return T{}; + } + return *MaybeVal; + } + template<typename IIter, typename OIter> Error ImportArrayChecked(IIter Ibegin, IIter Iend, OIter Obegin) { using ItemT = std::remove_reference_t<decltype(*Obegin)>; @@ -704,7 +696,7 @@ namespace clang { // Returns true if the given function has a placeholder return type and // that type is declared inside the body of the function. // E.g. auto f() { struct X{}; return X(); } - bool hasAutoReturnTypeDeclaredInside(FunctionDecl *D); + bool hasReturnTypeDeclaredInside(FunctionDecl *D); }; template <typename InContainerTy> @@ -754,9 +746,8 @@ ASTNodeImporter::ImportFunctionTemplateWithTemplateArgsFromSpecialization( return std::move(Err); // Import template arguments. - auto TemplArgs = FTSInfo->TemplateArguments->asArray(); - if (Error Err = ImportTemplateArguments(TemplArgs.data(), TemplArgs.size(), - std::get<1>(Result))) + if (Error Err = ImportTemplateArguments(FTSInfo->TemplateArguments->asArray(), + std::get<1>(Result))) return std::move(Err); return Result; @@ -803,7 +794,8 @@ ASTNodeImporter::import(const TemplateArgument &From) { ExpectedType ToTypeOrErr = import(From.getAsType()); if (!ToTypeOrErr) return ToTypeOrErr.takeError(); - return TemplateArgument(*ToTypeOrErr); + return TemplateArgument(*ToTypeOrErr, /*isNullPtr*/ false, + From.getIsDefaulted()); } case TemplateArgument::Integral: { @@ -820,14 +812,27 @@ ASTNodeImporter::import(const TemplateArgument &From) { ExpectedType ToTypeOrErr = import(From.getParamTypeForDecl()); if (!ToTypeOrErr) return ToTypeOrErr.takeError(); - return TemplateArgument(*ToOrErr, *ToTypeOrErr); + return TemplateArgument(dyn_cast<ValueDecl>((*ToOrErr)->getCanonicalDecl()), + *ToTypeOrErr, From.getIsDefaulted()); } case TemplateArgument::NullPtr: { ExpectedType ToTypeOrErr = import(From.getNullPtrType()); if (!ToTypeOrErr) return ToTypeOrErr.takeError(); - return TemplateArgument(*ToTypeOrErr, /*isNullPtr*/true); + return TemplateArgument(*ToTypeOrErr, /*isNullPtr*/ true, + From.getIsDefaulted()); + } + + case TemplateArgument::StructuralValue: { + ExpectedType ToTypeOrErr = import(From.getStructuralValueType()); + if (!ToTypeOrErr) + return ToTypeOrErr.takeError(); + Expected<APValue> ToValueOrErr = import(From.getAsStructuralValue()); + if (!ToValueOrErr) + return ToValueOrErr.takeError(); + return TemplateArgument(Importer.getToContext(), *ToTypeOrErr, + *ToValueOrErr); } case TemplateArgument::Template: { @@ -835,7 +840,7 @@ ASTNodeImporter::import(const TemplateArgument &From) { if (!ToTemplateOrErr) return ToTemplateOrErr.takeError(); - return TemplateArgument(*ToTemplateOrErr); + return TemplateArgument(*ToTemplateOrErr, From.getIsDefaulted()); } case TemplateArgument::TemplateExpansion: { @@ -844,25 +849,24 @@ ASTNodeImporter::import(const TemplateArgument &From) { if (!ToTemplateOrErr) return ToTemplateOrErr.takeError(); - return TemplateArgument( - *ToTemplateOrErr, From.getNumTemplateExpansions()); + return TemplateArgument(*ToTemplateOrErr, From.getNumTemplateExpansions(), + From.getIsDefaulted()); } case TemplateArgument::Expression: if (ExpectedExpr ToExpr = import(From.getAsExpr())) - return TemplateArgument(*ToExpr); + return TemplateArgument(*ToExpr, From.getIsDefaulted()); else return ToExpr.takeError(); case TemplateArgument::Pack: { SmallVector<TemplateArgument, 2> ToPack; ToPack.reserve(From.pack_size()); - if (Error Err = ImportTemplateArguments( - From.pack_begin(), From.pack_size(), ToPack)) + if (Error Err = ImportTemplateArguments(From.pack_elements(), ToPack)) return std::move(Err); return TemplateArgument( - llvm::makeArrayRef(ToPack).copy(Importer.getToContext())); + llvm::ArrayRef(ToPack).copy(Importer.getToContext())); } } @@ -942,7 +946,8 @@ ASTNodeImporter::import(const Designator &D) { if (!ToFieldLocOrErr) return ToFieldLocOrErr.takeError(); - return Designator(ToFieldName, *ToDotLocOrErr, *ToFieldLocOrErr); + return DesignatedInitExpr::Designator::CreateFieldDesignator( + ToFieldName, *ToDotLocOrErr, *ToFieldLocOrErr); } ExpectedSLoc ToLBracketLocOrErr = import(D.getLBracketLoc()); @@ -954,22 +959,50 @@ ASTNodeImporter::import(const Designator &D) { return ToRBracketLocOrErr.takeError(); if (D.isArrayDesignator()) - return Designator(D.getFirstExprIndex(), - *ToLBracketLocOrErr, *ToRBracketLocOrErr); + return Designator::CreateArrayDesignator(D.getArrayIndex(), + *ToLBracketLocOrErr, + *ToRBracketLocOrErr); ExpectedSLoc ToEllipsisLocOrErr = import(D.getEllipsisLoc()); if (!ToEllipsisLocOrErr) return ToEllipsisLocOrErr.takeError(); assert(D.isArrayRangeDesignator()); - return Designator( - D.getFirstExprIndex(), *ToLBracketLocOrErr, *ToEllipsisLocOrErr, + return Designator::CreateArrayRangeDesignator( + D.getArrayIndex(), *ToLBracketLocOrErr, *ToEllipsisLocOrErr, *ToRBracketLocOrErr); } template <> +Expected<ConceptReference *> ASTNodeImporter::import(ConceptReference *From) { + Error Err = Error::success(); + auto ToNNS = importChecked(Err, From->getNestedNameSpecifierLoc()); + auto ToTemplateKWLoc = importChecked(Err, From->getTemplateKWLoc()); + auto ToConceptNameLoc = + importChecked(Err, From->getConceptNameInfo().getLoc()); + auto ToConceptName = importChecked(Err, From->getConceptNameInfo().getName()); + auto ToFoundDecl = importChecked(Err, From->getFoundDecl()); + auto ToNamedConcept = importChecked(Err, From->getNamedConcept()); + if (Err) + return std::move(Err); + TemplateArgumentListInfo ToTAInfo; + const auto *ASTTemplateArgs = From->getTemplateArgsAsWritten(); + if (ASTTemplateArgs) + if (Error Err = ImportTemplateArgumentListInfo(*ASTTemplateArgs, ToTAInfo)) + return std::move(Err); + auto *ConceptRef = ConceptReference::Create( + Importer.getToContext(), ToNNS, ToTemplateKWLoc, + DeclarationNameInfo(ToConceptName, ToConceptNameLoc), ToFoundDecl, + ToNamedConcept, + ASTTemplateArgs ? ASTTemplateArgumentListInfo::Create( + Importer.getToContext(), ToTAInfo) + : nullptr); + return ConceptRef; +} + +template <> Expected<LambdaCapture> ASTNodeImporter::import(const LambdaCapture &From) { - VarDecl *Var = nullptr; + ValueDecl *Var = nullptr; if (From.capturesVariable()) { if (auto VarOrErr = import(From.getCapturedVar())) Var = *VarOrErr; @@ -1029,7 +1062,7 @@ using namespace clang; ExpectedType ASTNodeImporter::VisitType(const Type *T) { Importer.FromDiag(SourceLocation(), diag::err_unsupported_ast_node) << T->getTypeClassName(); - return make_error<ImportError>(ImportError::UnsupportedConstruct); + return make_error<ASTImportError>(ASTImportError::UnsupportedConstruct); } ExpectedType ASTNodeImporter::VisitAtomicType(const AtomicType *T){ @@ -1062,6 +1095,14 @@ ExpectedType ASTNodeImporter::VisitBuiltinType(const BuiltinType *T) { case BuiltinType::Id: \ return Importer.getToContext().SingletonId; #include "clang/Basic/RISCVVTypes.def" +#define WASM_TYPE(Name, Id, SingletonId) \ + case BuiltinType::Id: \ + return Importer.getToContext().SingletonId; +#include "clang/Basic/WebAssemblyReferenceTypes.def" +#define AMDGPU_TYPE(Name, Id, SingletonId) \ + case BuiltinType::Id: \ + return Importer.getToContext().SingletonId; +#include "clang/Basic/AMDGPUTypes.def" #define SHARED_SINGLETON_TYPE(Expansion) #define BUILTIN_TYPE(Id, SingletonId) \ case BuiltinType::Id: return Importer.getToContext().SingletonId; @@ -1161,12 +1202,12 @@ ASTNodeImporter::VisitMemberPointerType(const MemberPointerType *T) { if (!ToPointeeTypeOrErr) return ToPointeeTypeOrErr.takeError(); - ExpectedType ClassTypeOrErr = import(QualType(T->getClass(), 0)); + ExpectedTypePtr ClassTypeOrErr = import(T->getClass()); if (!ClassTypeOrErr) return ClassTypeOrErr.takeError(); - return Importer.getToContext().getMemberPointerType( - *ToPointeeTypeOrErr, (*ClassTypeOrErr).getTypePtr()); + return Importer.getToContext().getMemberPointerType(*ToPointeeTypeOrErr, + *ClassTypeOrErr); } ExpectedType @@ -1183,6 +1224,15 @@ ASTNodeImporter::VisitConstantArrayType(const ConstantArrayType *T) { } ExpectedType +ASTNodeImporter::VisitArrayParameterType(const ArrayParameterType *T) { + ExpectedType ToArrayTypeOrErr = VisitConstantArrayType(T); + if (!ToArrayTypeOrErr) + return ToArrayTypeOrErr.takeError(); + + return Importer.getToContext().getArrayParameterType(*ToArrayTypeOrErr); +} + +ExpectedType ASTNodeImporter::VisitIncompleteArrayType(const IncompleteArrayType *T) { ExpectedType ToElementTypeOrErr = import(T->getElementType()); if (!ToElementTypeOrErr) @@ -1222,6 +1272,18 @@ ExpectedType ASTNodeImporter::VisitDependentSizedArrayType( T->getIndexTypeCVRQualifiers(), ToBracketsRange); } +ExpectedType ASTNodeImporter::VisitDependentSizedExtVectorType( + const DependentSizedExtVectorType *T) { + Error Err = Error::success(); + QualType ToElementType = importChecked(Err, T->getElementType()); + Expr *ToSizeExpr = importChecked(Err, T->getSizeExpr()); + SourceLocation ToAttrLoc = importChecked(Err, T->getAttributeLoc()); + if (Err) + return std::move(Err); + return Importer.getToContext().getDependentSizedExtVectorType( + ToElementType, ToSizeExpr, ToAttrLoc); +} + ExpectedType ASTNodeImporter::VisitVectorType(const VectorType *T) { ExpectedType ToElementTypeOrErr = import(T->getElementType()); if (!ToElementTypeOrErr) @@ -1321,28 +1383,58 @@ ExpectedType ASTNodeImporter::VisitParenType(const ParenType *T) { return Importer.getToContext().getParenType(*ToInnerTypeOrErr); } +ExpectedType +ASTNodeImporter::VisitPackIndexingType(clang::PackIndexingType const *T) { + + ExpectedType Pattern = import(T->getPattern()); + if (!Pattern) + return Pattern.takeError(); + ExpectedExpr Index = import(T->getIndexExpr()); + if (!Index) + return Index.takeError(); + return Importer.getToContext().getPackIndexingType(*Pattern, *Index); +} + ExpectedType ASTNodeImporter::VisitTypedefType(const TypedefType *T) { Expected<TypedefNameDecl *> ToDeclOrErr = import(T->getDecl()); if (!ToDeclOrErr) return ToDeclOrErr.takeError(); - return Importer.getToContext().getTypeDeclType(*ToDeclOrErr); + TypedefNameDecl *ToDecl = *ToDeclOrErr; + if (ToDecl->getTypeForDecl()) + return QualType(ToDecl->getTypeForDecl(), 0); + + ExpectedType ToUnderlyingTypeOrErr = import(T->desugar()); + if (!ToUnderlyingTypeOrErr) + return ToUnderlyingTypeOrErr.takeError(); + + return Importer.getToContext().getTypedefType(ToDecl, *ToUnderlyingTypeOrErr); } ExpectedType ASTNodeImporter::VisitTypeOfExprType(const TypeOfExprType *T) { ExpectedExpr ToExprOrErr = import(T->getUnderlyingExpr()); if (!ToExprOrErr) return ToExprOrErr.takeError(); - - return Importer.getToContext().getTypeOfExprType(*ToExprOrErr); + return Importer.getToContext().getTypeOfExprType(*ToExprOrErr, T->getKind()); } ExpectedType ASTNodeImporter::VisitTypeOfType(const TypeOfType *T) { - ExpectedType ToUnderlyingTypeOrErr = import(T->getUnderlyingType()); + ExpectedType ToUnderlyingTypeOrErr = import(T->getUnmodifiedType()); if (!ToUnderlyingTypeOrErr) return ToUnderlyingTypeOrErr.takeError(); + return Importer.getToContext().getTypeOfType(*ToUnderlyingTypeOrErr, + T->getKind()); +} + +ExpectedType ASTNodeImporter::VisitUsingType(const UsingType *T) { + Expected<UsingShadowDecl *> FoundOrErr = import(T->getFoundDecl()); + if (!FoundOrErr) + return FoundOrErr.takeError(); + Expected<QualType> UnderlyingOrErr = import(T->getUnderlyingType()); + if (!UnderlyingOrErr) + return UnderlyingOrErr.takeError(); - return Importer.getToContext().getTypeOfType(*ToUnderlyingTypeOrErr); + return Importer.getToContext().getUsingType(*FoundOrErr, *UnderlyingOrErr); } ExpectedType ASTNodeImporter::VisitDecltypeType(const DecltypeType *T) { @@ -1384,9 +1476,7 @@ ExpectedType ASTNodeImporter::VisitAutoType(const AutoType *T) { return ToTypeConstraintConcept.takeError(); SmallVector<TemplateArgument, 2> ToTemplateArgs; - ArrayRef<TemplateArgument> FromTemplateArgs = T->getTypeConstraintArguments(); - if (Error Err = ImportTemplateArguments(FromTemplateArgs.data(), - FromTemplateArgs.size(), + if (Error Err = ImportTemplateArguments(T->getTypeConstraintArguments(), ToTemplateArgs)) return std::move(Err); @@ -1416,20 +1506,11 @@ ExpectedType ASTNodeImporter::VisitInjectedClassNameType( if (!ToDeclOrErr) return ToDeclOrErr.takeError(); - ExpectedType ToInjTypeOrErr = import(T->getInjectedSpecializationType()); - if (!ToInjTypeOrErr) - return ToInjTypeOrErr.takeError(); - - // FIXME: ASTContext::getInjectedClassNameType is not suitable for AST reading - // See comments in InjectedClassNameType definition for details - // return Importer.getToContext().getInjectedClassNameType(D, InjType); - enum { - TypeAlignmentInBits = 4, - TypeAlignment = 1 << TypeAlignmentInBits - }; - - return QualType(new (Importer.getToContext(), TypeAlignment) - InjectedClassNameType(*ToDeclOrErr, *ToInjTypeOrErr), 0); + // The InjectedClassNameType is created in VisitRecordDecl when the + // T->getDecl() is imported. Here we can return the existing type. + const Type *Ty = (*ToDeclOrErr)->getTypeForDecl(); + assert(isa_and_nonnull<InjectedClassNameType>(Ty)); + return QualType(Ty, 0); } ExpectedType ASTNodeImporter::VisitRecordType(const RecordType *T) { @@ -1460,6 +1541,28 @@ ExpectedType ASTNodeImporter::VisitAttributedType(const AttributedType *T) { *ToModifiedTypeOrErr, *ToEquivalentTypeOrErr); } +ExpectedType +ASTNodeImporter::VisitCountAttributedType(const CountAttributedType *T) { + ExpectedType ToWrappedTypeOrErr = import(T->desugar()); + if (!ToWrappedTypeOrErr) + return ToWrappedTypeOrErr.takeError(); + + Error Err = Error::success(); + Expr *CountExpr = importChecked(Err, T->getCountExpr()); + + SmallVector<TypeCoupledDeclRefInfo, 1> CoupledDecls; + for (const TypeCoupledDeclRefInfo &TI : T->dependent_decls()) { + Expected<ValueDecl *> ToDeclOrErr = import(TI.getDecl()); + if (!ToDeclOrErr) + return ToDeclOrErr.takeError(); + CoupledDecls.emplace_back(*ToDeclOrErr, TI.isDeref()); + } + + return Importer.getToContext().getCountAttributedType( + *ToWrappedTypeOrErr, CountExpr, T->isCountInBytes(), T->isOrNull(), + ArrayRef(CoupledDecls.data(), CoupledDecls.size())); +} + ExpectedType ASTNodeImporter::VisitTemplateTypeParmType( const TemplateTypeParmType *T) { Expected<TemplateTypeParmDecl *> ToDeclOrErr = import(T->getDecl()); @@ -1472,18 +1575,31 @@ ExpectedType ASTNodeImporter::VisitTemplateTypeParmType( ExpectedType ASTNodeImporter::VisitSubstTemplateTypeParmType( const SubstTemplateTypeParmType *T) { - ExpectedType ReplacedOrErr = import(QualType(T->getReplacedParameter(), 0)); + Expected<Decl *> ReplacedOrErr = import(T->getAssociatedDecl()); if (!ReplacedOrErr) return ReplacedOrErr.takeError(); - const TemplateTypeParmType *Replaced = - cast<TemplateTypeParmType>((*ReplacedOrErr).getTypePtr()); ExpectedType ToReplacementTypeOrErr = import(T->getReplacementType()); if (!ToReplacementTypeOrErr) return ToReplacementTypeOrErr.takeError(); return Importer.getToContext().getSubstTemplateTypeParmType( - Replaced, (*ToReplacementTypeOrErr).getCanonicalType()); + *ToReplacementTypeOrErr, *ReplacedOrErr, T->getIndex(), + T->getPackIndex()); +} + +ExpectedType ASTNodeImporter::VisitSubstTemplateTypeParmPackType( + const SubstTemplateTypeParmPackType *T) { + Expected<Decl *> ReplacedOrErr = import(T->getAssociatedDecl()); + if (!ReplacedOrErr) + return ReplacedOrErr.takeError(); + + Expected<TemplateArgument> ToArgumentPack = import(T->getArgumentPack()); + if (!ToArgumentPack) + return ToArgumentPack.takeError(); + + return Importer.getToContext().getSubstTemplateTypeParmPackType( + *ReplacedOrErr, T->getIndex(), T->getFinal(), *ToArgumentPack); } ExpectedType ASTNodeImporter::VisitTemplateSpecializationType( @@ -1493,12 +1609,12 @@ ExpectedType ASTNodeImporter::VisitTemplateSpecializationType( return ToTemplateOrErr.takeError(); SmallVector<TemplateArgument, 2> ToTemplateArgs; - if (Error Err = ImportTemplateArguments( - T->getArgs(), T->getNumArgs(), ToTemplateArgs)) + if (Error Err = + ImportTemplateArguments(T->template_arguments(), ToTemplateArgs)) return std::move(Err); QualType ToCanonType; - if (!QualType(T, 0).isCanonical()) { + if (!T->isCanonicalUnqualified()) { QualType FromCanonType = Importer.getFromContext().getCanonicalType(QualType(T, 0)); if (ExpectedType TyOrErr = import(FromCanonType)) @@ -1551,9 +1667,8 @@ ExpectedType ASTNodeImporter::VisitDependentTemplateSpecializationType( IdentifierInfo *ToName = Importer.Import(T->getIdentifier()); SmallVector<TemplateArgument, 2> ToPack; - ToPack.reserve(T->getNumArgs()); - if (Error Err = ImportTemplateArguments( - T->getArgs(), T->getNumArgs(), ToPack)) + ToPack.reserve(T->template_arguments().size()); + if (Error Err = ImportTemplateArguments(T->template_arguments(), ToPack)) return std::move(Err); return Importer.getToContext().getDependentTemplateSpecializationType( @@ -1626,6 +1741,134 @@ ASTNodeImporter::VisitObjCObjectPointerType(const ObjCObjectPointerType *T) { return Importer.getToContext().getObjCObjectPointerType(*ToPointeeTypeOrErr); } +ExpectedType +ASTNodeImporter::VisitMacroQualifiedType(const MacroQualifiedType *T) { + ExpectedType ToUnderlyingTypeOrErr = import(T->getUnderlyingType()); + if (!ToUnderlyingTypeOrErr) + return ToUnderlyingTypeOrErr.takeError(); + + IdentifierInfo *ToIdentifier = Importer.Import(T->getMacroIdentifier()); + return Importer.getToContext().getMacroQualifiedType(*ToUnderlyingTypeOrErr, + ToIdentifier); +} + +ExpectedType clang::ASTNodeImporter::VisitAdjustedType(const AdjustedType *T) { + Error Err = Error::success(); + QualType ToOriginalType = importChecked(Err, T->getOriginalType()); + QualType ToAdjustedType = importChecked(Err, T->getAdjustedType()); + if (Err) + return std::move(Err); + + return Importer.getToContext().getAdjustedType(ToOriginalType, + ToAdjustedType); +} + +ExpectedType clang::ASTNodeImporter::VisitBitIntType(const BitIntType *T) { + return Importer.getToContext().getBitIntType(T->isUnsigned(), + T->getNumBits()); +} + +ExpectedType clang::ASTNodeImporter::VisitBTFTagAttributedType( + const clang::BTFTagAttributedType *T) { + Error Err = Error::success(); + const BTFTypeTagAttr *ToBTFAttr = importChecked(Err, T->getAttr()); + QualType ToWrappedType = importChecked(Err, T->getWrappedType()); + if (Err) + return std::move(Err); + + return Importer.getToContext().getBTFTagAttributedType(ToBTFAttr, + ToWrappedType); +} + +ExpectedType clang::ASTNodeImporter::VisitConstantMatrixType( + const clang::ConstantMatrixType *T) { + ExpectedType ToElementTypeOrErr = import(T->getElementType()); + if (!ToElementTypeOrErr) + return ToElementTypeOrErr.takeError(); + + return Importer.getToContext().getConstantMatrixType( + *ToElementTypeOrErr, T->getNumRows(), T->getNumColumns()); +} + +ExpectedType clang::ASTNodeImporter::VisitDependentAddressSpaceType( + const clang::DependentAddressSpaceType *T) { + Error Err = Error::success(); + QualType ToPointeeType = importChecked(Err, T->getPointeeType()); + Expr *ToAddrSpaceExpr = importChecked(Err, T->getAddrSpaceExpr()); + SourceLocation ToAttrLoc = importChecked(Err, T->getAttributeLoc()); + if (Err) + return std::move(Err); + + return Importer.getToContext().getDependentAddressSpaceType( + ToPointeeType, ToAddrSpaceExpr, ToAttrLoc); +} + +ExpectedType clang::ASTNodeImporter::VisitDependentBitIntType( + const clang::DependentBitIntType *T) { + ExpectedExpr ToNumBitsExprOrErr = import(T->getNumBitsExpr()); + if (!ToNumBitsExprOrErr) + return ToNumBitsExprOrErr.takeError(); + return Importer.getToContext().getDependentBitIntType(T->isUnsigned(), + *ToNumBitsExprOrErr); +} + +ExpectedType clang::ASTNodeImporter::VisitDependentSizedMatrixType( + const clang::DependentSizedMatrixType *T) { + Error Err = Error::success(); + QualType ToElementType = importChecked(Err, T->getElementType()); + Expr *ToRowExpr = importChecked(Err, T->getRowExpr()); + Expr *ToColumnExpr = importChecked(Err, T->getColumnExpr()); + SourceLocation ToAttrLoc = importChecked(Err, T->getAttributeLoc()); + if (Err) + return std::move(Err); + + return Importer.getToContext().getDependentSizedMatrixType( + ToElementType, ToRowExpr, ToColumnExpr, ToAttrLoc); +} + +ExpectedType clang::ASTNodeImporter::VisitDependentVectorType( + const clang::DependentVectorType *T) { + Error Err = Error::success(); + QualType ToElementType = importChecked(Err, T->getElementType()); + Expr *ToSizeExpr = importChecked(Err, T->getSizeExpr()); + SourceLocation ToAttrLoc = importChecked(Err, T->getAttributeLoc()); + if (Err) + return std::move(Err); + + return Importer.getToContext().getDependentVectorType( + ToElementType, ToSizeExpr, ToAttrLoc, T->getVectorKind()); +} + +ExpectedType clang::ASTNodeImporter::VisitObjCTypeParamType( + const clang::ObjCTypeParamType *T) { + Expected<ObjCTypeParamDecl *> ToDeclOrErr = import(T->getDecl()); + if (!ToDeclOrErr) + return ToDeclOrErr.takeError(); + + SmallVector<ObjCProtocolDecl *, 4> ToProtocols; + for (ObjCProtocolDecl *FromProtocol : T->getProtocols()) { + Expected<ObjCProtocolDecl *> ToProtocolOrErr = import(FromProtocol); + if (!ToProtocolOrErr) + return ToProtocolOrErr.takeError(); + ToProtocols.push_back(*ToProtocolOrErr); + } + + return Importer.getToContext().getObjCTypeParamType(*ToDeclOrErr, + ToProtocols); +} + +ExpectedType clang::ASTNodeImporter::VisitPipeType(const clang::PipeType *T) { + ExpectedType ToElementTypeOrErr = import(T->getElementType()); + if (!ToElementTypeOrErr) + return ToElementTypeOrErr.takeError(); + + ASTContext &ToCtx = Importer.getToContext(); + if (T->isReadOnly()) + return ToCtx.getReadPipeType(*ToElementTypeOrErr); + else + return ToCtx.getWritePipeType(*ToElementTypeOrErr); +} + //---------------------------------------------------------------------------- // Import Declarations //---------------------------------------------------------------------------- @@ -1655,7 +1898,7 @@ Error ASTNodeImporter::ImportDeclParts( if (RT && RT->getDecl() == D) { Importer.FromDiag(D->getLocation(), diag::err_unsupported_ast_node) << D->getDeclKindName(); - return make_error<ImportError>(ImportError::UnsupportedConstruct); + return make_error<ASTImportError>(ASTImportError::UnsupportedConstruct); } } } @@ -1788,60 +2031,35 @@ ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) { // because there is an ODR error with two typedefs. As another example, // the client may allow EnumConstantDecls with same names but with // different values in two distinct translation units. - bool AccumulateChildErrors = isa<TagDecl>(FromDC); + ChildErrorHandlingStrategy HandleChildErrors(FromDC); + + auto MightNeedReordering = [](const Decl *D) { + return isa<FieldDecl>(D) || isa<IndirectFieldDecl>(D) || isa<FriendDecl>(D); + }; + // Import everything that might need reordering first. Error ChildErrors = Error::success(); for (auto *From : FromDC->decls()) { + if (!MightNeedReordering(From)) + continue; + ExpectedDecl ImportedOrErr = import(From); // If we are in the process of ImportDefinition(...) for a RecordDecl we // want to make sure that we are also completing each FieldDecl. There // are currently cases where this does not happen and this is correctness // fix since operations such as code generation will expect this to be so. - if (ImportedOrErr) { - FieldDecl *FieldFrom = dyn_cast_or_null<FieldDecl>(From); - Decl *ImportedDecl = *ImportedOrErr; - FieldDecl *FieldTo = dyn_cast_or_null<FieldDecl>(ImportedDecl); - if (FieldFrom && FieldTo) { - RecordDecl *FromRecordDecl = nullptr; - RecordDecl *ToRecordDecl = nullptr; - // If we have a field that is an ArrayType we need to check if the array - // element is a RecordDecl and if so we need to import the defintion. - if (FieldFrom->getType()->isArrayType()) { - // getBaseElementTypeUnsafe(...) handles multi-dimensonal arrays for us. - FromRecordDecl = FieldFrom->getType()->getBaseElementTypeUnsafe()->getAsRecordDecl(); - ToRecordDecl = FieldTo->getType()->getBaseElementTypeUnsafe()->getAsRecordDecl(); - } - - if (!FromRecordDecl || !ToRecordDecl) { - const RecordType *RecordFrom = - FieldFrom->getType()->getAs<RecordType>(); - const RecordType *RecordTo = FieldTo->getType()->getAs<RecordType>(); - - if (RecordFrom && RecordTo) { - FromRecordDecl = RecordFrom->getDecl(); - ToRecordDecl = RecordTo->getDecl(); - } - } - - if (FromRecordDecl && ToRecordDecl) { - if (FromRecordDecl->isCompleteDefinition() && - !ToRecordDecl->isCompleteDefinition()) { - Error Err = ImportDefinition(FromRecordDecl, ToRecordDecl); - - if (Err && AccumulateChildErrors) - ChildErrors = joinErrors(std::move(ChildErrors), std::move(Err)); - else - consumeError(std::move(Err)); - } - } - } - } else { - if (AccumulateChildErrors) - ChildErrors = - joinErrors(std::move(ChildErrors), ImportedOrErr.takeError()); - else - consumeError(ImportedOrErr.takeError()); + if (!ImportedOrErr) { + HandleChildErrors.handleChildImportResult(ChildErrors, + ImportedOrErr.takeError()); + continue; + } + FieldDecl *FieldFrom = dyn_cast_or_null<FieldDecl>(From); + Decl *ImportedDecl = *ImportedOrErr; + FieldDecl *FieldTo = dyn_cast_or_null<FieldDecl>(ImportedDecl); + if (FieldFrom && FieldTo) { + Error Err = ImportFieldDeclDefinition(FieldFrom, FieldTo); + HandleChildErrors.handleChildImportResult(ChildErrors, std::move(Err)); } } @@ -1856,7 +2074,7 @@ ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) { // During the import of `a` we import first the dependencies in sequence, // thus the order would be `c`, `b`, `a`. We will get the normal order by // first removing the already imported members and then adding them in the - // order as they apper in the "from" context. + // order as they appear in the "from" context. // // Keeping field order is vital because it determines structure layout. // @@ -1868,9 +2086,6 @@ ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) { // interface in LLDB is implemented by the means of the ASTImporter. However, // calling an import at this point would result in an uncontrolled import, we // must avoid that. - const auto *FromRD = dyn_cast<RecordDecl>(FromDC); - if (!FromRD) - return ChildErrors; auto ToDCOrErr = Importer.ImportContext(FromDC); if (!ToDCOrErr) { @@ -1878,15 +2093,17 @@ ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) { return ToDCOrErr.takeError(); } - DeclContext *ToDC = *ToDCOrErr; - // Remove all declarations, which may be in wrong order in the - // lexical DeclContext and then add them in the proper order. - for (auto *D : FromRD->decls()) { - if (isa<FieldDecl>(D) || isa<IndirectFieldDecl>(D) || isa<FriendDecl>(D)) { + if (const auto *FromRD = dyn_cast<RecordDecl>(FromDC)) { + DeclContext *ToDC = *ToDCOrErr; + // Remove all declarations, which may be in wrong order in the + // lexical DeclContext and then add them in the proper order. + for (auto *D : FromRD->decls()) { + if (!MightNeedReordering(D)) + continue; + assert(D && "DC contains a null decl"); - Decl *ToD = Importer.GetAlreadyImportedOrNull(D); - // Remove only the decls which we successfully imported. - if (ToD) { + if (Decl *ToD = Importer.GetAlreadyImportedOrNull(D)) { + // Remove only the decls which we successfully imported. assert(ToDC == ToD->getLexicalDeclContext() && ToDC->containsDecl(ToD)); // Remove the decl from its wrong place in the linked list. ToDC->removeDecl(ToD); @@ -1898,9 +2115,53 @@ ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) { } } + // Import everything else. + for (auto *From : FromDC->decls()) { + if (MightNeedReordering(From)) + continue; + + ExpectedDecl ImportedOrErr = import(From); + if (!ImportedOrErr) + HandleChildErrors.handleChildImportResult(ChildErrors, + ImportedOrErr.takeError()); + } + return ChildErrors; } +Error ASTNodeImporter::ImportFieldDeclDefinition(const FieldDecl *From, + const FieldDecl *To) { + RecordDecl *FromRecordDecl = nullptr; + RecordDecl *ToRecordDecl = nullptr; + // If we have a field that is an ArrayType we need to check if the array + // element is a RecordDecl and if so we need to import the definition. + QualType FromType = From->getType(); + QualType ToType = To->getType(); + if (FromType->isArrayType()) { + // getBaseElementTypeUnsafe(...) handles multi-dimensonal arrays for us. + FromRecordDecl = FromType->getBaseElementTypeUnsafe()->getAsRecordDecl(); + ToRecordDecl = ToType->getBaseElementTypeUnsafe()->getAsRecordDecl(); + } + + if (!FromRecordDecl || !ToRecordDecl) { + const RecordType *RecordFrom = FromType->getAs<RecordType>(); + const RecordType *RecordTo = ToType->getAs<RecordType>(); + + if (RecordFrom && RecordTo) { + FromRecordDecl = RecordFrom->getDecl(); + ToRecordDecl = RecordTo->getDecl(); + } + } + + if (FromRecordDecl && ToRecordDecl) { + if (FromRecordDecl->isCompleteDefinition() && + !ToRecordDecl->isCompleteDefinition()) + return ImportDefinition(FromRecordDecl, ToRecordDecl); + } + + return Error::success(); +} + Error ASTNodeImporter::ImportDeclContext( Decl *FromD, DeclContext *&ToDC, DeclContext *&ToLexicalDC) { auto ToDCOrErr = Importer.ImportContext(FromD->getDeclContext()); @@ -1991,6 +2252,14 @@ Error ASTNodeImporter::ImportDefinition( } To->startDefinition(); + // Set the definition to complete even if it is really not complete during + // import. Some AST constructs (expressions) require the record layout + // to be calculated (see 'clang::computeDependence') at the time they are + // constructed. Import of such AST node is possible during import of the + // same record, there is no way to have a completely defined record (all + // fields imported) at that time without multiple AST import passes. + if (!Importer.isMinimalImport()) + To->setCompleteDefinition(true); // Complete the definition even if error is returned. // The RecordDecl may be already part of the AST so it is better to // have it in complete state even if something is wrong with it. @@ -2055,9 +2324,10 @@ Error ASTNodeImporter::ImportDefinition( ToCXX->setBases(Bases.data(), Bases.size()); } - if (shouldForceImportDeclContext(Kind)) + if (shouldForceImportDeclContext(Kind)) { if (Error Err = ImportDeclContext(From, /*ForceImport=*/true)) return Err; + } return Error::success(); } @@ -2121,10 +2391,10 @@ Error ASTNodeImporter::ImportDefinition( } Error ASTNodeImporter::ImportTemplateArguments( - const TemplateArgument *FromArgs, unsigned NumFromArgs, + ArrayRef<TemplateArgument> FromArgs, SmallVectorImpl<TemplateArgument> &ToArgs) { - for (unsigned I = 0; I != NumFromArgs; ++I) { - if (auto ToOrErr = import(FromArgs[I])) + for (const auto &Arg : FromArgs) { + if (auto ToOrErr = import(Arg)) ToArgs.push_back(*ToOrErr); else return ToOrErr.takeError(); @@ -2157,110 +2427,33 @@ getStructuralEquivalenceKind(const ASTImporter &Importer) { : StructuralEquivalenceKind::Default; } -bool ASTNodeImporter::IsStructuralMatch(Decl *From, Decl *To, bool Complain) { - StructuralEquivalenceContext Ctx( - Importer.getFromContext(), Importer.getToContext(), - Importer.getNonEquivalentDecls(), getStructuralEquivalenceKind(Importer), - false, Complain); - return Ctx.IsEquivalent(From, To); -} - -bool ASTNodeImporter::IsStructuralMatch(RecordDecl *FromRecord, - RecordDecl *ToRecord, bool Complain) { +bool ASTNodeImporter::IsStructuralMatch(Decl *From, Decl *To, bool Complain, + bool IgnoreTemplateParmDepth) { // Eliminate a potential failure point where we attempt to re-import // something we're trying to import while completing ToRecord. - Decl *ToOrigin = Importer.GetOriginalDecl(ToRecord); + Decl *ToOrigin = Importer.GetOriginalDecl(To); if (ToOrigin) { - auto *ToOriginRecord = dyn_cast<RecordDecl>(ToOrigin); - if (ToOriginRecord) - ToRecord = ToOriginRecord; + To = ToOrigin; } - StructuralEquivalenceContext Ctx(Importer.getFromContext(), - ToRecord->getASTContext(), - Importer.getNonEquivalentDecls(), - getStructuralEquivalenceKind(Importer), - false, Complain); - return Ctx.IsEquivalent(FromRecord, ToRecord); -} - -bool ASTNodeImporter::IsStructuralMatch(VarDecl *FromVar, VarDecl *ToVar, - bool Complain) { - StructuralEquivalenceContext Ctx( - Importer.getFromContext(), Importer.getToContext(), - Importer.getNonEquivalentDecls(), getStructuralEquivalenceKind(Importer), - false, Complain); - return Ctx.IsEquivalent(FromVar, ToVar); -} - -bool ASTNodeImporter::IsStructuralMatch(EnumDecl *FromEnum, EnumDecl *ToEnum) { - // Eliminate a potential failure point where we attempt to re-import - // something we're trying to import while completing ToEnum. - if (Decl *ToOrigin = Importer.GetOriginalDecl(ToEnum)) - if (auto *ToOriginEnum = dyn_cast<EnumDecl>(ToOrigin)) - ToEnum = ToOriginEnum; - - StructuralEquivalenceContext Ctx( - Importer.getFromContext(), Importer.getToContext(), - Importer.getNonEquivalentDecls(), getStructuralEquivalenceKind(Importer)); - return Ctx.IsEquivalent(FromEnum, ToEnum); -} - -bool ASTNodeImporter::IsStructuralMatch(FunctionTemplateDecl *From, - FunctionTemplateDecl *To) { StructuralEquivalenceContext Ctx( Importer.getFromContext(), Importer.getToContext(), Importer.getNonEquivalentDecls(), getStructuralEquivalenceKind(Importer), - false, false); - return Ctx.IsEquivalent(From, To); -} - -bool ASTNodeImporter::IsStructuralMatch(FunctionDecl *From, FunctionDecl *To) { - StructuralEquivalenceContext Ctx( - Importer.getFromContext(), Importer.getToContext(), - Importer.getNonEquivalentDecls(), getStructuralEquivalenceKind(Importer), - false, false); - return Ctx.IsEquivalent(From, To); -} - -bool ASTNodeImporter::IsStructuralMatch(EnumConstantDecl *FromEC, - EnumConstantDecl *ToEC) { - const llvm::APSInt &FromVal = FromEC->getInitVal(); - const llvm::APSInt &ToVal = ToEC->getInitVal(); - - return FromVal.isSigned() == ToVal.isSigned() && - FromVal.getBitWidth() == ToVal.getBitWidth() && - FromVal == ToVal; -} - -bool ASTNodeImporter::IsStructuralMatch(ClassTemplateDecl *From, - ClassTemplateDecl *To) { - StructuralEquivalenceContext Ctx(Importer.getFromContext(), - Importer.getToContext(), - Importer.getNonEquivalentDecls(), - getStructuralEquivalenceKind(Importer)); - return Ctx.IsEquivalent(From, To); -} - -bool ASTNodeImporter::IsStructuralMatch(VarTemplateDecl *From, - VarTemplateDecl *To) { - StructuralEquivalenceContext Ctx(Importer.getFromContext(), - Importer.getToContext(), - Importer.getNonEquivalentDecls(), - getStructuralEquivalenceKind(Importer)); + /*StrictTypeSpelling=*/false, Complain, /*ErrorOnTagTypeMismatch=*/false, + IgnoreTemplateParmDepth); return Ctx.IsEquivalent(From, To); } ExpectedDecl ASTNodeImporter::VisitDecl(Decl *D) { Importer.FromDiag(D->getLocation(), diag::err_unsupported_ast_node) << D->getDeclKindName(); - return make_error<ImportError>(ImportError::UnsupportedConstruct); + return make_error<ASTImportError>(ASTImportError::UnsupportedConstruct); } ExpectedDecl ASTNodeImporter::VisitImportDecl(ImportDecl *D) { Importer.FromDiag(D->getLocation(), diag::err_unsupported_ast_node) << D->getDeclKindName(); - return make_error<ImportError>(ImportError::UnsupportedConstruct); + return make_error<ASTImportError>(ASTImportError::UnsupportedConstruct); } ExpectedDecl ASTNodeImporter::VisitEmptyDecl(EmptyDecl *D) { @@ -2431,10 +2624,10 @@ ExpectedDecl ASTNodeImporter::VisitNamespaceDecl(NamespaceDecl *D) { // Create the "to" namespace, if needed. NamespaceDecl *ToNamespace = MergeWithNamespace; if (!ToNamespace) { - if (GetImportedOrCreateDecl( - ToNamespace, D, Importer.getToContext(), DC, D->isInline(), - *BeginLocOrErr, Loc, Name.getAsIdentifierInfo(), - /*PrevDecl=*/nullptr)) + if (GetImportedOrCreateDecl(ToNamespace, D, Importer.getToContext(), DC, + D->isInline(), *BeginLocOrErr, Loc, + Name.getAsIdentifierInfo(), + /*PrevDecl=*/nullptr, D->isNested())) return ToNamespace; ToNamespace->setRBraceLoc(*RBraceLocOrErr); ToNamespace->setLexicalDeclContext(LexicalDC); @@ -2532,6 +2725,22 @@ ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) { QualType FromUT = D->getUnderlyingType(); QualType FoundUT = FoundTypedef->getUnderlyingType(); if (Importer.IsStructurallyEquivalent(FromUT, FoundUT)) { + // If the underlying declarations are unnamed records these can be + // imported as different types. We should create a distinct typedef + // node in this case. + // If we found an existing underlying type with a record in a + // different context (than the imported), this is already reason for + // having distinct typedef nodes for these. + // Again this can create situation like + // 'typedef int T; typedef int T;' but this is hard to avoid without + // a rename strategy at import. + if (!FromUT.isNull() && !FoundUT.isNull()) { + RecordDecl *FromR = FromUT->getAsRecordDecl(); + RecordDecl *FoundR = FoundUT->getAsRecordDecl(); + if (FromR && FoundR && + !hasSameVisibilityContextAndLinkage(FoundR, FromR)) + continue; + } // If the "From" context has a complete underlying type but we // already have a complete underlying type then return with that. if (!FromUT->isIncompleteType() && !FoundUT->isIncompleteType()) @@ -2623,9 +2832,11 @@ ASTNodeImporter::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) { for (auto *FoundDecl : FoundDecls) { if (!FoundDecl->isInIdentifierNamespace(IDNS)) continue; - if (auto *FoundAlias = dyn_cast<TypeAliasTemplateDecl>(FoundDecl)) - return Importer.MapImported(D, FoundAlias); - ConflictingDecls.push_back(FoundDecl); + if (auto *FoundAlias = dyn_cast<TypeAliasTemplateDecl>(FoundDecl)) { + if (IsStructuralMatch(D, FoundAlias)) + return Importer.MapImported(D, FoundAlias); + ConflictingDecls.push_back(FoundDecl); + } } if (!ConflictingDecls.empty()) { @@ -2722,7 +2933,7 @@ ExpectedDecl ASTNodeImporter::VisitEnumDecl(EnumDecl *D) { // We may already have an enum of the same name; try to find and match it. EnumDecl *PrevDecl = nullptr; - if (!DC->isFunctionOrMethod() && SearchName) { + if (!DC->isFunctionOrMethod()) { SmallVector<NamedDecl *, 4> ConflictingDecls; auto FoundDecls = Importer.findDeclsInToCtx(DC, SearchName); @@ -2738,7 +2949,7 @@ ExpectedDecl ASTNodeImporter::VisitEnumDecl(EnumDecl *D) { if (auto *FoundEnum = dyn_cast<EnumDecl>(FoundDecl)) { if (!hasSameVisibilityContextAndLinkage(FoundEnum, D)) continue; - if (IsStructuralMatch(D, FoundEnum)) { + if (IsStructuralMatch(D, FoundEnum, !SearchName.isEmpty())) { EnumDecl *FoundDef = FoundEnum->getDefinition(); if (D->isThisDeclarationADefinition() && FoundDef) return Importer.MapImported(D, FoundDef); @@ -2749,7 +2960,12 @@ ExpectedDecl ASTNodeImporter::VisitEnumDecl(EnumDecl *D) { } } - if (!ConflictingDecls.empty()) { + // In case of unnamed enums, we try to find an existing similar one, if none + // was found, perform the import always. + // Structural in-equivalence is not detected in this way here, but it may + // be found when the parent decl is imported (if the enum is part of a + // class). To make this totally exact a more difficult solution is needed. + if (SearchName && !ConflictingDecls.empty()) { ExpectedName NameOrErr = Importer.HandleNameConflict( SearchName, DC, IDNS, ConflictingDecls.data(), ConflictingDecls.size()); @@ -2834,9 +3050,13 @@ ExpectedDecl ASTNodeImporter::VisitRecordDecl(RecordDecl *D) { } else if (Importer.getToContext().getLangOpts().CPlusPlus) IDNS |= Decl::IDNS_Ordinary | Decl::IDNS_TagFriend; + bool IsDependentContext = DC != LexicalDC ? LexicalDC->isDependentContext() + : DC->isDependentContext(); + bool DependentFriend = IsFriendTemplate && IsDependentContext; + // We may already have a record of the same name; try to find and match it. RecordDecl *PrevDecl = nullptr; - if (!DC->isFunctionOrMethod() && !D->isLambda()) { + if (!DependentFriend && !DC->isFunctionOrMethod() && !D->isLambda()) { SmallVector<NamedDecl *, 4> ConflictingDecls; auto FoundDecls = Importer.findDeclsInToCtx(DC, SearchName); @@ -2923,16 +3143,15 @@ ExpectedDecl ASTNodeImporter::VisitRecordDecl(RecordDecl *D) { return TInfoOrErr.takeError(); if (GetImportedOrCreateSpecialDecl( D2CXX, CXXRecordDecl::CreateLambda, D, Importer.getToContext(), - DC, *TInfoOrErr, Loc, DCXX->isDependentLambda(), + DC, *TInfoOrErr, Loc, DCXX->getLambdaDependencyKind(), DCXX->isGenericLambda(), DCXX->getLambdaCaptureDefault())) return D2CXX; - ExpectedDecl CDeclOrErr = import(DCXX->getLambdaContextDecl()); + CXXRecordDecl::LambdaNumbering Numbering = DCXX->getLambdaNumbering(); + ExpectedDecl CDeclOrErr = import(Numbering.ContextDecl); if (!CDeclOrErr) return CDeclOrErr.takeError(); - D2CXX->setLambdaMangling(DCXX->getLambdaManglingNumber(), *CDeclOrErr, - DCXX->hasKnownLambdaInternalLinkage()); - D2CXX->setDeviceLambdaManglingNumber( - DCXX->getDeviceLambdaManglingNumber()); + Numbering.ContextDecl = *CDeclOrErr; + D2CXX->setLambdaNumbering(Numbering); } else if (DCXX->isInjectedClassName()) { // We have to be careful to do a similar dance to the one in // Sema::ActOnStartCXXMemberDeclarations @@ -2968,8 +3187,6 @@ ExpectedDecl ASTNodeImporter::VisitRecordDecl(RecordDecl *D) { // InjectedClassNameType (see Sema::CheckClassTemplate). Update the // previously set type to the correct value here (ToDescribed is not // available at record create). - // FIXME: The previous type is cleared but not removed from - // ASTContext's internal storage. CXXRecordDecl *Injected = nullptr; for (NamedDecl *Found : D2CXX->noload_lookup(Name)) { auto *Record = dyn_cast<CXXRecordDecl>(Found); @@ -2979,20 +3196,34 @@ ExpectedDecl ASTNodeImporter::VisitRecordDecl(RecordDecl *D) { } } // Create an injected type for the whole redecl chain. + // The chain may contain an already existing injected type at the start, + // if yes this should be reused. We must ensure that only one type + // object exists for the injected type (including the injected record + // declaration), ASTContext does not check it. SmallVector<Decl *, 2> Redecls = getCanonicalForwardRedeclChain(D2CXX); + const Type *FrontTy = + cast<CXXRecordDecl>(Redecls.front())->getTypeForDecl(); + QualType InjSpec; + if (auto *InjTy = FrontTy->getAs<InjectedClassNameType>()) + InjSpec = InjTy->getInjectedSpecializationType(); + else + InjSpec = ToDescribed->getInjectedClassNameSpecialization(); for (auto *R : Redecls) { auto *RI = cast<CXXRecordDecl>(R); - RI->setTypeForDecl(nullptr); - // Below we create a new injected type and assign that to the - // canonical decl, subsequent declarations in the chain will reuse - // that type. - Importer.getToContext().getInjectedClassNameType( - RI, ToDescribed->getInjectedClassNameSpecialization()); + if (R != Redecls.front() || + !isa<InjectedClassNameType>(RI->getTypeForDecl())) + RI->setTypeForDecl(nullptr); + // This function tries to get the injected type from getTypeForDecl, + // then from the previous declaration if possible. If not, it creates + // a new type. + Importer.getToContext().getInjectedClassNameType(RI, InjSpec); } - // Set the new type for the previous injected decl too. + // Set the new type for the injected decl too. if (Injected) { Injected->setTypeForDecl(nullptr); + // This function will copy the injected type from D2CXX into Injected. + // The injected decl does not have a previous decl to copy from. Importer.getToContext().getTypeDeclType(Injected, D2CXX); } } @@ -3101,8 +3332,9 @@ ExpectedDecl ASTNodeImporter::VisitEnumConstantDecl(EnumConstantDecl *D) { return ToEnumerator; } -Error ASTNodeImporter::ImportTemplateParameterLists(const DeclaratorDecl *FromD, - DeclaratorDecl *ToD) { +template <typename DeclTy> +Error ASTNodeImporter::ImportTemplateParameterLists(const DeclTy *FromD, + DeclTy *ToD) { unsigned int Num = FromD->getNumTemplateParameterLists(); if (Num == 0) return Error::success(); @@ -3124,6 +3356,11 @@ Error ASTNodeImporter::ImportTemplateInformation( case FunctionDecl::TK_FunctionTemplate: return Error::success(); + case FunctionDecl::TK_DependentNonTemplate: + if (Expected<FunctionDecl *> InstFDOrErr = + import(FromFD->getInstantiatedFromDecl())) + ToFD->setInstantiatedFromDecl(*InstFDOrErr); + return Error::success(); case FunctionDecl::TK_MemberSpecialization: { TemplateSpecializationKind TSK = FromFD->getTemplateSpecializationKind(); @@ -3175,27 +3412,25 @@ Error ASTNodeImporter::ImportTemplateInformation( case FunctionDecl::TK_DependentFunctionTemplateSpecialization: { auto *FromInfo = FromFD->getDependentSpecializationInfo(); - UnresolvedSet<8> TemplDecls; - unsigned NumTemplates = FromInfo->getNumTemplates(); - for (unsigned I = 0; I < NumTemplates; I++) { - if (Expected<FunctionTemplateDecl *> ToFTDOrErr = - import(FromInfo->getTemplate(I))) - TemplDecls.addDecl(*ToFTDOrErr); + UnresolvedSet<8> Candidates; + for (FunctionTemplateDecl *FTD : FromInfo->getCandidates()) { + if (Expected<FunctionTemplateDecl *> ToFTDOrErr = import(FTD)) + Candidates.addDecl(*ToFTDOrErr); else return ToFTDOrErr.takeError(); } // Import TemplateArgumentListInfo. TemplateArgumentListInfo ToTAInfo; - if (Error Err = ImportTemplateArgumentListInfo( - FromInfo->getLAngleLoc(), FromInfo->getRAngleLoc(), - llvm::makeArrayRef( - FromInfo->getTemplateArgs(), FromInfo->getNumTemplateArgs()), - ToTAInfo)) - return Err; + const auto *FromTAArgsAsWritten = FromInfo->TemplateArgumentsAsWritten; + if (FromTAArgsAsWritten) + if (Error Err = + ImportTemplateArgumentListInfo(*FromTAArgsAsWritten, ToTAInfo)) + return Err; - ToFD->setDependentTemplateSpecialization(Importer.getToContext(), - TemplDecls, ToTAInfo); + ToFD->setDependentTemplateSpecialization( + Importer.getToContext(), Candidates, + FromTAArgsAsWritten ? &ToTAInfo : nullptr); return Error::success(); } } @@ -3229,9 +3464,12 @@ Error ASTNodeImporter::ImportFunctionDeclBody(FunctionDecl *FromFD, } // Returns true if the given D has a DeclContext up to the TranslationUnitDecl -// which is equal to the given DC. +// which is equal to the given DC, or D is equal to DC. static bool isAncestorDeclContextOf(const DeclContext *DC, const Decl *D) { - const DeclContext *DCi = D->getDeclContext(); + const DeclContext *DCi = dyn_cast<DeclContext>(D); + if (!DCi) + DCi = D->getDeclContext(); + assert(DCi && "Declaration should have a context"); while (DCi != D->getTranslationUnitDecl()) { if (DCi == DC) return true; @@ -3240,31 +3478,213 @@ static bool isAncestorDeclContextOf(const DeclContext *DC, const Decl *D) { return false; } -bool ASTNodeImporter::hasAutoReturnTypeDeclaredInside(FunctionDecl *D) { - QualType FromTy = D->getType(); - const FunctionProtoType *FromFPT = FromTy->getAs<FunctionProtoType>(); - assert(FromFPT && "Must be called on FunctionProtoType"); - if (AutoType *AutoT = FromFPT->getReturnType()->getContainedAutoType()) { - QualType DeducedT = AutoT->getDeducedType(); - if (const RecordType *RecordT = - DeducedT.isNull() ? nullptr : dyn_cast<RecordType>(DeducedT)) { - RecordDecl *RD = RecordT->getDecl(); - assert(RD); - if (isAncestorDeclContextOf(D, RD)) { - assert(RD->getLexicalDeclContext() == RD->getDeclContext()); - return true; - } +// Check if there is a declaration that has 'DC' as parent context and is +// referenced from statement 'S' or one of its children. The search is done in +// BFS order through children of 'S'. +static bool isAncestorDeclContextOf(const DeclContext *DC, const Stmt *S) { + SmallVector<const Stmt *> ToProcess; + ToProcess.push_back(S); + while (!ToProcess.empty()) { + const Stmt *CurrentS = ToProcess.pop_back_val(); + ToProcess.append(CurrentS->child_begin(), CurrentS->child_end()); + if (const auto *DeclRef = dyn_cast<DeclRefExpr>(CurrentS)) { + if (const Decl *D = DeclRef->getDecl()) + if (isAncestorDeclContextOf(DC, D)) + return true; + } else if (const auto *E = + dyn_cast_or_null<SubstNonTypeTemplateParmExpr>(CurrentS)) { + if (const Decl *D = E->getAssociatedDecl()) + if (isAncestorDeclContextOf(DC, D)) + return true; + } + } + return false; +} + +namespace { +/// Check if a type has any reference to a declaration that is inside the body +/// of a function. +/// The \c CheckType(QualType) function should be used to determine +/// this property. +/// +/// The type visitor visits one type object only (not recursive). +/// To find all referenced declarations we must discover all type objects until +/// the canonical type is reached (walk over typedef and similar objects). This +/// is done by loop over all "sugar" type objects. For every such type we must +/// check all declarations that are referenced from it. For this check the +/// visitor is used. In the visit functions all referenced declarations except +/// the one that follows in the sugar chain (if any) must be checked. For this +/// check the same visitor is re-used (it has no state-dependent data). +/// +/// The visit functions have 3 possible return values: +/// - True, found a declaration inside \c ParentDC. +/// - False, found declarations only outside \c ParentDC and it is not possible +/// to find more declarations (the "sugar" chain does not continue). +/// - Empty optional value, found no declarations or only outside \c ParentDC, +/// but it is possible to find more declarations in the type "sugar" chain. +/// The loop over the "sugar" types can be implemented by using type visit +/// functions only (call \c CheckType with the desugared type). With the current +/// solution no visit function is needed if the type has only a desugared type +/// as data. +class IsTypeDeclaredInsideVisitor + : public TypeVisitor<IsTypeDeclaredInsideVisitor, std::optional<bool>> { +public: + IsTypeDeclaredInsideVisitor(const FunctionDecl *ParentDC) + : ParentDC(ParentDC) {} + + bool CheckType(QualType T) { + // Check the chain of "sugar" types. + // The "sugar" types are typedef or similar types that have the same + // canonical type. + if (std::optional<bool> Res = Visit(T.getTypePtr())) + return *Res; + QualType DsT = + T.getSingleStepDesugaredType(ParentDC->getParentASTContext()); + while (DsT != T) { + if (std::optional<bool> Res = Visit(DsT.getTypePtr())) + return *Res; + T = DsT; + DsT = T.getSingleStepDesugaredType(ParentDC->getParentASTContext()); } + return false; + } + + std::optional<bool> VisitTagType(const TagType *T) { + if (auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(T->getDecl())) + for (const auto &Arg : Spec->getTemplateArgs().asArray()) + if (checkTemplateArgument(Arg)) + return true; + return isAncestorDeclContextOf(ParentDC, T->getDecl()); } - if (const TypedefType *TypedefT = - dyn_cast<TypedefType>(FromFPT->getReturnType())) { - TypedefNameDecl *TD = TypedefT->getDecl(); + + std::optional<bool> VisitPointerType(const PointerType *T) { + return CheckType(T->getPointeeType()); + } + + std::optional<bool> VisitReferenceType(const ReferenceType *T) { + return CheckType(T->getPointeeTypeAsWritten()); + } + + std::optional<bool> VisitTypedefType(const TypedefType *T) { + const TypedefNameDecl *TD = T->getDecl(); assert(TD); - if (isAncestorDeclContextOf(D, TD)) { - assert(TD->getLexicalDeclContext() == TD->getDeclContext()); + return isAncestorDeclContextOf(ParentDC, TD); + } + + std::optional<bool> VisitUsingType(const UsingType *T) { + if (T->getFoundDecl() && + isAncestorDeclContextOf(ParentDC, T->getFoundDecl())) return true; + + return {}; + } + + std::optional<bool> + VisitTemplateSpecializationType(const TemplateSpecializationType *T) { + for (const auto &Arg : T->template_arguments()) + if (checkTemplateArgument(Arg)) + return true; + // This type is a "sugar" to a record type, it can have a desugared type. + return {}; + } + + std::optional<bool> + VisitSubstTemplateTypeParmType(const SubstTemplateTypeParmType *T) { + // The "associated declaration" can be the same as ParentDC. + if (isAncestorDeclContextOf(ParentDC, T->getAssociatedDecl())) + return true; + return {}; + } + + std::optional<bool> VisitConstantArrayType(const ConstantArrayType *T) { + if (T->getSizeExpr() && isAncestorDeclContextOf(ParentDC, T->getSizeExpr())) + return true; + + return CheckType(T->getElementType()); + } + + std::optional<bool> VisitVariableArrayType(const VariableArrayType *T) { + llvm_unreachable( + "Variable array should not occur in deduced return type of a function"); + } + + std::optional<bool> VisitIncompleteArrayType(const IncompleteArrayType *T) { + llvm_unreachable("Incomplete array should not occur in deduced return type " + "of a function"); + } + + std::optional<bool> VisitDependentArrayType(const IncompleteArrayType *T) { + llvm_unreachable("Dependent array should not occur in deduced return type " + "of a function"); + } + +private: + const DeclContext *const ParentDC; + + bool checkTemplateArgument(const TemplateArgument &Arg) { + switch (Arg.getKind()) { + case TemplateArgument::Null: + return false; + case TemplateArgument::Integral: + return CheckType(Arg.getIntegralType()); + case TemplateArgument::Type: + return CheckType(Arg.getAsType()); + case TemplateArgument::Expression: + return isAncestorDeclContextOf(ParentDC, Arg.getAsExpr()); + case TemplateArgument::Declaration: + // FIXME: The declaration in this case is not allowed to be in a function? + return isAncestorDeclContextOf(ParentDC, Arg.getAsDecl()); + case TemplateArgument::NullPtr: + // FIXME: The type is not allowed to be in the function? + return CheckType(Arg.getNullPtrType()); + case TemplateArgument::StructuralValue: + return CheckType(Arg.getStructuralValueType()); + case TemplateArgument::Pack: + for (const auto &PackArg : Arg.getPackAsArray()) + if (checkTemplateArgument(PackArg)) + return true; + return false; + case TemplateArgument::Template: + // Templates can not be defined locally in functions. + // A template passed as argument can be not in ParentDC. + return false; + case TemplateArgument::TemplateExpansion: + // Templates can not be defined locally in functions. + // A template passed as argument can be not in ParentDC. + return false; } + llvm_unreachable("Unknown TemplateArgument::ArgKind enum"); + }; +}; +} // namespace + +/// This function checks if the given function has a return type that contains +/// a reference (in any way) to a declaration inside the same function. +bool ASTNodeImporter::hasReturnTypeDeclaredInside(FunctionDecl *D) { + QualType FromTy = D->getType(); + const auto *FromFPT = FromTy->getAs<FunctionProtoType>(); + assert(FromFPT && "Must be called on FunctionProtoType"); + + auto IsCXX11LambdaWithouTrailingReturn = [&]() { + if (Importer.FromContext.getLangOpts().CPlusPlus14) // C++14 or later + return false; + + if (FromFPT->hasTrailingReturn()) + return false; + + if (const auto *MD = dyn_cast<CXXMethodDecl>(D)) + return cast<CXXRecordDecl>(MD->getDeclContext())->isLambda(); + + return false; + }; + + QualType RetT = FromFPT->getReturnType(); + if (isa<AutoType>(RetT.getTypePtr()) || IsCXX11LambdaWithouTrailingReturn()) { + FunctionDecl *Def = D->getDefinition(); + IsTypeDeclaredInsideVisitor Visitor(Def ? Def : D); + return Visitor.CheckType(RetT); } + return false; } @@ -3399,11 +3819,14 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) { return std::move(Err); QualType FromTy = D->getType(); + TypeSourceInfo *FromTSI = D->getTypeSourceInfo(); // Set to true if we do not import the type of the function as is. There are // cases when the original type would result in an infinite recursion during // the import. To avoid an infinite recursion when importing, we create the // FunctionDecl with a simplified function type and update it only after the // relevant AST nodes are already imported. + // The type is related to TypeSourceInfo (it references the type), so we must + // do the same with TypeSourceInfo. bool UsedDifferentProtoType = false; if (const auto *FromFPT = FromTy->getAs<FunctionProtoType>()) { QualType FromReturnTy = FromFPT->getReturnType(); @@ -3412,7 +3835,7 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) { // E.g.: auto foo() { struct X{}; return X(); } // To avoid an infinite recursion when importing, create the FunctionDecl // with a simplified return type. - if (hasAutoReturnTypeDeclaredInside(D)) { + if (hasReturnTypeDeclaredInside(D)) { FromReturnTy = Importer.getFromContext().VoidTy; UsedDifferentProtoType = true; } @@ -3430,13 +3853,16 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) { } FromTy = Importer.getFromContext().getFunctionType( FromReturnTy, FromFPT->getParamTypes(), FromEPI); + FromTSI = Importer.getFromContext().getTrivialTypeSourceInfo( + FromTy, D->getBeginLoc()); } Error Err = Error::success(); auto T = importChecked(Err, FromTy); - auto TInfo = importChecked(Err, D->getTypeSourceInfo()); + auto TInfo = importChecked(Err, FromTSI); auto ToInnerLocStart = importChecked(Err, D->getInnerLocStart()); auto ToEndLoc = importChecked(Err, D->getEndLoc()); + auto ToDefaultLoc = importChecked(Err, D->getDefaultLoc()); auto ToQualifierLoc = importChecked(Err, D->getQualifierLoc()); auto TrailingRequiresClause = importChecked(Err, D->getTrailingRequiresClause()); @@ -3445,7 +3871,7 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) { // Import the function parameters. SmallVector<ParmVarDecl *, 8> Parameters; - for (auto P : D->parameters()) { + for (auto *P : D->parameters()) { if (Expected<ParmVarDecl *> ToPOrErr = import(P)) Parameters.push_back(*ToPOrErr); else @@ -3459,13 +3885,19 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) { importExplicitSpecifier(Err, FromConstructor->getExplicitSpecifier()); if (Err) return std::move(Err); + auto ToInheritedConstructor = InheritedConstructor(); + if (FromConstructor->isInheritingConstructor()) { + Expected<InheritedConstructor> ImportedInheritedCtor = + import(FromConstructor->getInheritedConstructor()); + if (!ImportedInheritedCtor) + return ImportedInheritedCtor.takeError(); + ToInheritedConstructor = *ImportedInheritedCtor; + } if (GetImportedOrCreateDecl<CXXConstructorDecl>( ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC), - ToInnerLocStart, NameInfo, T, TInfo, ESpec, D->isInlineSpecified(), - D->isImplicit(), D->getConstexprKind(), - InheritedConstructor(), // FIXME: Properly import inherited - // constructor info - TrailingRequiresClause)) + ToInnerLocStart, NameInfo, T, TInfo, ESpec, D->UsesFPIntrin(), + D->isInlineSpecified(), D->isImplicit(), D->getConstexprKind(), + ToInheritedConstructor, TrailingRequiresClause)) return ToFunction; } else if (CXXDestructorDecl *FromDtor = dyn_cast<CXXDestructorDecl>(D)) { @@ -3477,9 +3909,10 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) { return std::move(Err); if (GetImportedOrCreateDecl<CXXDestructorDecl>( - ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC), - ToInnerLocStart, NameInfo, T, TInfo, D->isInlineSpecified(), - D->isImplicit(), D->getConstexprKind(), TrailingRequiresClause)) + ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC), + ToInnerLocStart, NameInfo, T, TInfo, D->UsesFPIntrin(), + D->isInlineSpecified(), D->isImplicit(), D->getConstexprKind(), + TrailingRequiresClause)) return ToFunction; CXXDestructorDecl *ToDtor = cast<CXXDestructorDecl>(ToFunction); @@ -3493,15 +3926,16 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) { return std::move(Err); if (GetImportedOrCreateDecl<CXXConversionDecl>( ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC), - ToInnerLocStart, NameInfo, T, TInfo, D->isInlineSpecified(), ESpec, - D->getConstexprKind(), SourceLocation(), TrailingRequiresClause)) + ToInnerLocStart, NameInfo, T, TInfo, D->UsesFPIntrin(), + D->isInlineSpecified(), ESpec, D->getConstexprKind(), + SourceLocation(), TrailingRequiresClause)) return ToFunction; } else if (auto *Method = dyn_cast<CXXMethodDecl>(D)) { if (GetImportedOrCreateDecl<CXXMethodDecl>( ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC), ToInnerLocStart, NameInfo, T, TInfo, Method->getStorageClass(), - Method->isInlineSpecified(), D->getConstexprKind(), - SourceLocation(), TrailingRequiresClause)) + Method->UsesFPIntrin(), Method->isInlineSpecified(), + D->getConstexprKind(), SourceLocation(), TrailingRequiresClause)) return ToFunction; } else if (auto *Guide = dyn_cast<CXXDeductionGuideDecl>(D)) { ExplicitSpecifier ESpec = @@ -3515,13 +3949,13 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) { NameInfo, T, TInfo, ToEndLoc, Ctor)) return ToFunction; cast<CXXDeductionGuideDecl>(ToFunction) - ->setIsCopyDeductionCandidate(Guide->isCopyDeductionCandidate()); + ->setDeductionCandidateKind(Guide->getDeductionCandidateKind()); } else { if (GetImportedOrCreateDecl( ToFunction, D, Importer.getToContext(), DC, ToInnerLocStart, - NameInfo, T, TInfo, D->getStorageClass(), D->isInlineSpecified(), - D->hasWrittenPrototype(), D->getConstexprKind(), - TrailingRequiresClause)) + NameInfo, T, TInfo, D->getStorageClass(), D->UsesFPIntrin(), + D->isInlineSpecified(), D->hasWrittenPrototype(), + D->getConstexprKind(), TrailingRequiresClause)) return ToFunction; } @@ -3537,16 +3971,32 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) { // decl and its redeclarations may be required. } + StringLiteral *Msg = D->getDeletedMessage(); + if (Msg) { + auto Imported = import(Msg); + if (!Imported) + return Imported.takeError(); + Msg = *Imported; + } + ToFunction->setQualifierInfo(ToQualifierLoc); ToFunction->setAccess(D->getAccess()); ToFunction->setLexicalDeclContext(LexicalDC); ToFunction->setVirtualAsWritten(D->isVirtualAsWritten()); ToFunction->setTrivial(D->isTrivial()); - ToFunction->setPure(D->isPure()); + ToFunction->setIsPureVirtual(D->isPureVirtual()); ToFunction->setDefaulted(D->isDefaulted()); ToFunction->setExplicitlyDefaulted(D->isExplicitlyDefaulted()); ToFunction->setDeletedAsWritten(D->isDeletedAsWritten()); + ToFunction->setFriendConstraintRefersToEnclosingTemplate( + D->FriendConstraintRefersToEnclosingTemplate()); ToFunction->setRangeEnd(ToEndLoc); + ToFunction->setDefaultLoc(ToDefaultLoc); + + if (Msg) + ToFunction->setDefaultedOrDeletedInfo( + FunctionDecl::DefaultedOrDeletedFunctionInfo::Create( + Importer.getToContext(), {}, Msg)); // Set the parameters. for (auto *Param : Parameters) { @@ -3591,6 +4041,15 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) { } } + // If it is a template, import all related things. + if (Error Err = ImportTemplateInformation(D, ToFunction)) + return std::move(Err); + + if (auto *FromCXXMethod = dyn_cast<CXXMethodDecl>(D)) + if (Error Err = ImportOverriddenMethods(cast<CXXMethodDecl>(ToFunction), + FromCXXMethod)) + return std::move(Err); + if (D->doesThisDeclarationHaveABody()) { Error Err = ImportFunctionDeclBody(D, ToFunction); @@ -3604,21 +4063,16 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) { ToFunction->setType(*TyOrErr); else return TyOrErr.takeError(); + if (Expected<TypeSourceInfo *> TSIOrErr = import(D->getTypeSourceInfo())) + ToFunction->setTypeSourceInfo(*TSIOrErr); + else + return TSIOrErr.takeError(); } // FIXME: Other bits to merge? - // If it is a template, import all related things. - if (Error Err = ImportTemplateInformation(D, ToFunction)) - return std::move(Err); - addDeclToContexts(D, ToFunction); - if (auto *FromCXXMethod = dyn_cast<CXXMethodDecl>(D)) - if (Error Err = ImportOverriddenMethods(cast<CXXMethodDecl>(ToFunction), - FromCXXMethod)) - return std::move(Err); - // Import the rest of the chain. I.e. import all subsequent declarations. for (++RedeclIt; RedeclIt != Redecls.end(); ++RedeclIt) { ExpectedDecl ToRedeclOrErr = import(*RedeclIt); @@ -3678,19 +4132,19 @@ ExpectedDecl ASTNodeImporter::VisitFieldDecl(FieldDecl *D) { // initializer of a FieldDecl might not had been instantiated in the // "To" context. However, the "From" context might instantiated that, // thus we have to merge that. + // Note: `hasInClassInitializer()` is not the same as non-null + // `getInClassInitializer()` value. if (Expr *FromInitializer = D->getInClassInitializer()) { - // We don't have yet the initializer set. - if (FoundField->hasInClassInitializer() && - !FoundField->getInClassInitializer()) { - if (ExpectedExpr ToInitializerOrErr = import(FromInitializer)) + if (ExpectedExpr ToInitializerOrErr = import(FromInitializer)) { + // Import of the FromInitializer may result in the setting of + // InClassInitializer. If not, set it here. + assert(FoundField->hasInClassInitializer() && + "Field should have an in-class initializer if it has an " + "expression for it."); + if (!FoundField->getInClassInitializer()) FoundField->setInClassInitializer(*ToInitializerOrErr); - else { - // We can't return error here, - // since we already mapped D as imported. - // FIXME: warning message? - consumeError(ToInitializerOrErr.takeError()); - return FoundField; - } + } else { + return ToInitializerOrErr.takeError(); } } return FoundField; @@ -3702,7 +4156,7 @@ ExpectedDecl ASTNodeImporter::VisitFieldDecl(FieldDecl *D) { Importer.ToDiag(FoundField->getLocation(), diag::note_odr_value_here) << FoundField->getType(); - return make_error<ImportError>(ImportError::NameConflict); + return make_error<ASTImportError>(ASTImportError::NameConflict); } } @@ -3711,7 +4165,6 @@ ExpectedDecl ASTNodeImporter::VisitFieldDecl(FieldDecl *D) { auto ToTInfo = importChecked(Err, D->getTypeSourceInfo()); auto ToBitWidth = importChecked(Err, D->getBitWidth()); auto ToInnerLocStart = importChecked(Err, D->getInnerLocStart()); - auto ToInitializer = importChecked(Err, D->getInClassInitializer()); if (Err) return std::move(Err); const Type *ToCapturedVLAType = nullptr; @@ -3726,14 +4179,32 @@ ExpectedDecl ASTNodeImporter::VisitFieldDecl(FieldDecl *D) { D->getInClassInitStyle())) return ToField; + // We need [[no_unqiue_address]] attributes to be added to FieldDecl, before + // we add fields in CXXRecordDecl::addedMember, otherwise record will be + // marked as having non-zero size. + Err = Importer.ImportAttrs(ToField, D); + if (Err) + return std::move(Err); ToField->setAccess(D->getAccess()); ToField->setLexicalDeclContext(LexicalDC); - if (ToInitializer) - ToField->setInClassInitializer(ToInitializer); ToField->setImplicit(D->isImplicit()); if (ToCapturedVLAType) ToField->setCapturedVLAType(cast<VariableArrayType>(ToCapturedVLAType)); LexicalDC->addDeclInternal(ToField); + // Import initializer only after the field was created, it may have recursive + // reference to the field. + auto ToInitializer = importChecked(Err, D->getInClassInitializer()); + if (Err) + return std::move(Err); + if (ToInitializer) { + auto *AlreadyImported = ToField->getInClassInitializer(); + if (AlreadyImported) + assert(ToInitializer == AlreadyImported && + "Duplicate import of in-class initializer."); + else + ToField->setInClassInitializer(ToInitializer); + } + return ToField; } @@ -3775,7 +4246,7 @@ ExpectedDecl ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) { Importer.ToDiag(FoundField->getLocation(), diag::note_odr_value_here) << FoundField->getType(); - return make_error<ImportError>(ImportError::NameConflict); + return make_error<ASTImportError>(ASTImportError::NameConflict); } } @@ -3815,22 +4286,34 @@ struct FriendCountAndPosition { unsigned int IndexOfDecl; }; -template <class T> -static FriendCountAndPosition getFriendCountAndPosition( - const FriendDecl *FD, - llvm::function_ref<T(const FriendDecl *)> GetCanTypeOrDecl) { +static bool IsEquivalentFriend(ASTImporter &Importer, FriendDecl *FD1, + FriendDecl *FD2) { + if ((!FD1->getFriendType()) != (!FD2->getFriendType())) + return false; + + if (const TypeSourceInfo *TSI = FD1->getFriendType()) + return Importer.IsStructurallyEquivalent( + TSI->getType(), FD2->getFriendType()->getType(), /*Complain=*/false); + + ASTImporter::NonEquivalentDeclSet NonEquivalentDecls; + StructuralEquivalenceContext Ctx( + FD1->getASTContext(), FD2->getASTContext(), NonEquivalentDecls, + StructuralEquivalenceKind::Default, + /* StrictTypeSpelling = */ false, /* Complain = */ false); + return Ctx.IsEquivalent(FD1, FD2); +} + +static FriendCountAndPosition getFriendCountAndPosition(ASTImporter &Importer, + FriendDecl *FD) { unsigned int FriendCount = 0; - llvm::Optional<unsigned int> FriendPosition; + std::optional<unsigned int> FriendPosition; const auto *RD = cast<CXXRecordDecl>(FD->getLexicalDeclContext()); - T TypeOrDecl = GetCanTypeOrDecl(FD); - - for (const FriendDecl *FoundFriend : RD->friends()) { + for (FriendDecl *FoundFriend : RD->friends()) { if (FoundFriend == FD) { FriendPosition = FriendCount; ++FriendCount; - } else if (!FoundFriend->getFriendDecl() == !FD->getFriendDecl() && - GetCanTypeOrDecl(FoundFriend) == TypeOrDecl) { + } else if (IsEquivalentFriend(Importer, FD, FoundFriend)) { ++FriendCount; } } @@ -3840,21 +4323,6 @@ static FriendCountAndPosition getFriendCountAndPosition( return {FriendCount, *FriendPosition}; } -static FriendCountAndPosition getFriendCountAndPosition(const FriendDecl *FD) { - if (FD->getFriendType()) - return getFriendCountAndPosition<QualType>(FD, [](const FriendDecl *F) { - if (TypeSourceInfo *TSI = F->getFriendType()) - return TSI->getType().getCanonicalType(); - llvm_unreachable("Wrong friend object type."); - }); - else - return getFriendCountAndPosition<Decl *>(FD, [](const FriendDecl *F) { - if (Decl *D = F->getFriendDecl()) - return D->getCanonicalDecl(); - llvm_unreachable("Wrong friend object type."); - }); -} - ExpectedDecl ASTNodeImporter::VisitFriendDecl(FriendDecl *D) { // Import the major distinguishing characteristics of a declaration. DeclContext *DC, *LexicalDC; @@ -3865,26 +4333,13 @@ ExpectedDecl ASTNodeImporter::VisitFriendDecl(FriendDecl *D) { // FriendDecl is not a NamedDecl so we cannot use lookup. // We try to maintain order and count of redundant friend declarations. const auto *RD = cast<CXXRecordDecl>(DC); - FriendDecl *ImportedFriend = RD->getFirstFriend(); SmallVector<FriendDecl *, 2> ImportedEquivalentFriends; - - while (ImportedFriend) { - bool Match = false; - if (D->getFriendDecl() && ImportedFriend->getFriendDecl()) { - Match = - IsStructuralMatch(D->getFriendDecl(), ImportedFriend->getFriendDecl(), - /*Complain=*/false); - } else if (D->getFriendType() && ImportedFriend->getFriendType()) { - Match = Importer.IsStructurallyEquivalent( - D->getFriendType()->getType(), - ImportedFriend->getFriendType()->getType(), /*Complain=*/false); - } - if (Match) + for (FriendDecl *ImportedFriend : RD->friends()) + if (IsEquivalentFriend(Importer, D, ImportedFriend)) ImportedEquivalentFriends.push_back(ImportedFriend); - ImportedFriend = ImportedFriend->getNextFriend(); - } - FriendCountAndPosition CountAndPosition = getFriendCountAndPosition(D); + FriendCountAndPosition CountAndPosition = + getFriendCountAndPosition(Importer, D); assert(ImportedEquivalentFriends.size() <= CountAndPosition.TotalCount && "Class with non-matching friends is imported, ODR check wrong?"); @@ -3966,7 +4421,7 @@ ExpectedDecl ASTNodeImporter::VisitObjCIvarDecl(ObjCIvarDecl *D) { Importer.ToDiag(FoundIvar->getLocation(), diag::note_odr_value_here) << FoundIvar->getType(); - return make_error<ImportError>(ImportError::NameConflict); + return make_error<ASTImportError>(ASTImportError::NameConflict); } } @@ -4123,6 +4578,10 @@ ExpectedDecl ASTNodeImporter::VisitVarDecl(VarDecl *D) { ToVar->setQualifierInfo(ToQualifierLoc); ToVar->setAccess(D->getAccess()); ToVar->setLexicalDeclContext(LexicalDC); + if (D->isInlineSpecified()) + ToVar->setInlineSpecified(); + if (D->isInline()) + ToVar->setImplicitlyInline(); if (FoundByLookup) { auto *Recent = const_cast<VarDecl *>(FoundByLookup->getMostRecentDecl()); @@ -4134,6 +4593,17 @@ ExpectedDecl ASTNodeImporter::VisitVarDecl(VarDecl *D) { auto ToVTOrErr = import(D->getDescribedVarTemplate()); if (!ToVTOrErr) return ToVTOrErr.takeError(); + } else if (MemberSpecializationInfo *MSI = D->getMemberSpecializationInfo()) { + TemplateSpecializationKind SK = MSI->getTemplateSpecializationKind(); + VarDecl *FromInst = D->getInstantiatedFromStaticDataMember(); + if (Expected<VarDecl *> ToInstOrErr = import(FromInst)) + ToVar->setInstantiationOfStaticDataMember(*ToInstOrErr, SK); + else + return ToInstOrErr.takeError(); + if (ExpectedSLoc POIOrErr = import(MSI->getPointOfInstantiation())) + ToVar->getMemberSpecializationInfo()->setPointOfInstantiation(*POIOrErr); + else + return POIOrErr.takeError(); } if (Error Err = ImportInitializer(D, ToVar)) @@ -4178,6 +4648,8 @@ ExpectedDecl ASTNodeImporter::VisitImplicitParamDecl(ImplicitParamDecl *D) { Error ASTNodeImporter::ImportDefaultArgOfParmVarDecl( const ParmVarDecl *FromParam, ParmVarDecl *ToParam) { ToParam->setHasInheritedDefaultArg(FromParam->hasInheritedDefaultArg()); + ToParam->setExplicitObjectParameterLoc( + FromParam->getExplicitObjectParamThisLoc()); ToParam->setKNRPromoted(FromParam->isKNRPromoted()); if (FromParam->hasUninstantiatedDefaultArg()) { @@ -4197,6 +4669,17 @@ Error ASTNodeImporter::ImportDefaultArgOfParmVarDecl( return Error::success(); } +Expected<InheritedConstructor> +ASTNodeImporter::ImportInheritedConstructor(const InheritedConstructor &From) { + Error Err = Error::success(); + CXXConstructorDecl *ToBaseCtor = importChecked(Err, From.getConstructor()); + ConstructorUsingShadowDecl *ToShadow = + importChecked(Err, From.getShadowDecl()); + if (Err) + return std::move(Err); + return InheritedConstructor(ToShadow, ToBaseCtor); +} + ExpectedDecl ASTNodeImporter::VisitParmVarDecl(ParmVarDecl *D) { // Parameters are created in the translation unit's context, then moved // into the function declaration's context afterward. @@ -4263,7 +4746,7 @@ ExpectedDecl ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) { diag::note_odr_objc_method_here) << D->isInstanceMethod() << Name; - return make_error<ImportError>(ImportError::NameConflict); + return make_error<ASTImportError>(ASTImportError::NameConflict); } // Check the number of parameters. @@ -4275,7 +4758,7 @@ ExpectedDecl ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) { diag::note_odr_objc_method_here) << D->isInstanceMethod() << Name; - return make_error<ImportError>(ImportError::NameConflict); + return make_error<ASTImportError>(ASTImportError::NameConflict); } // Check parameter types. @@ -4291,7 +4774,7 @@ ExpectedDecl ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) { Importer.ToDiag((*FoundP)->getLocation(), diag::note_odr_value_here) << (*FoundP)->getType(); - return make_error<ImportError>(ImportError::NameConflict); + return make_error<ASTImportError>(ASTImportError::NameConflict); } } @@ -4304,7 +4787,7 @@ ExpectedDecl ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) { diag::note_odr_objc_method_here) << D->isInstanceMethod() << Name; - return make_error<ImportError>(ImportError::NameConflict); + return make_error<ASTImportError>(ASTImportError::NameConflict); } // FIXME: Any other bits we need to merge? @@ -4395,6 +4878,11 @@ ExpectedDecl ASTNodeImporter::VisitObjCTypeParamDecl(ObjCTypeParamDecl *D) { ToColonLoc, ToTypeSourceInfo)) return Result; + // Only import 'ObjCTypeParamType' after the decl is created. + auto ToTypeForDecl = importChecked(Err, D->getTypeForDecl()); + if (Err) + return std::move(Err); + Result->setTypeForDecl(ToTypeForDecl); Result->setLexicalDeclContext(LexicalDC); return Result; } @@ -4693,13 +5181,14 @@ ExpectedDecl ASTNodeImporter::VisitUsingEnumDecl(UsingEnumDecl *D) { Error Err = Error::success(); auto ToUsingLoc = importChecked(Err, D->getUsingLoc()); auto ToEnumLoc = importChecked(Err, D->getEnumLoc()); - auto ToEnumDecl = importChecked(Err, D->getEnumDecl()); + auto ToNameLoc = importChecked(Err, D->getLocation()); + auto *ToEnumType = importChecked(Err, D->getEnumType()); if (Err) return std::move(Err); UsingEnumDecl *ToUsingEnum; if (GetImportedOrCreateDecl(ToUsingEnum, D, Importer.getToContext(), DC, - ToUsingLoc, ToEnumLoc, Loc, ToEnumDecl)) + ToUsingLoc, ToEnumLoc, ToNameLoc, ToEnumType)) return ToUsingEnum; ToUsingEnum->setLexicalDeclContext(LexicalDC); @@ -4736,9 +5225,29 @@ ExpectedDecl ASTNodeImporter::VisitUsingShadowDecl(UsingShadowDecl *D) { return ToTargetOrErr.takeError(); UsingShadowDecl *ToShadow; - if (GetImportedOrCreateDecl(ToShadow, D, Importer.getToContext(), DC, Loc, - Name, *ToIntroducerOrErr, *ToTargetOrErr)) - return ToShadow; + if (auto *FromConstructorUsingShadow = + dyn_cast<ConstructorUsingShadowDecl>(D)) { + Error Err = Error::success(); + ConstructorUsingShadowDecl *Nominated = importChecked( + Err, FromConstructorUsingShadow->getNominatedBaseClassShadowDecl()); + if (Err) + return std::move(Err); + // The 'Target' parameter of ConstructorUsingShadowDecl constructor + // is really the "NominatedBaseClassShadowDecl" value if it exists + // (see code of ConstructorUsingShadowDecl::ConstructorUsingShadowDecl). + // We should pass the NominatedBaseClassShadowDecl to it (if non-null) to + // get the correct values. + if (GetImportedOrCreateDecl<ConstructorUsingShadowDecl>( + ToShadow, D, Importer.getToContext(), DC, Loc, + cast<UsingDecl>(*ToIntroducerOrErr), + Nominated ? Nominated : *ToTargetOrErr, + FromConstructorUsingShadow->constructsVirtualBase())) + return ToShadow; + } else { + if (GetImportedOrCreateDecl(ToShadow, D, Importer.getToContext(), DC, Loc, + Name, *ToIntroducerOrErr, *ToTargetOrErr)) + return ToShadow; + } ToShadow->setLexicalDeclContext(LexicalDC); ToShadow->setAccess(D->getAccess()); @@ -4798,6 +5307,35 @@ ExpectedDecl ASTNodeImporter::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) { return ToUsingDir; } +ExpectedDecl ASTNodeImporter::VisitUsingPackDecl(UsingPackDecl *D) { + DeclContext *DC, *LexicalDC; + DeclarationName Name; + SourceLocation Loc; + NamedDecl *ToD = nullptr; + if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) + return std::move(Err); + if (ToD) + return ToD; + + auto ToInstantiatedFromUsingOrErr = + Importer.Import(D->getInstantiatedFromUsingDecl()); + if (!ToInstantiatedFromUsingOrErr) + return ToInstantiatedFromUsingOrErr.takeError(); + SmallVector<NamedDecl *, 4> Expansions(D->expansions().size()); + if (Error Err = ImportArrayChecked(D->expansions(), Expansions.begin())) + return std::move(Err); + + UsingPackDecl *ToUsingPack; + if (GetImportedOrCreateDecl(ToUsingPack, D, Importer.getToContext(), DC, + cast<NamedDecl>(*ToInstantiatedFromUsingOrErr), + Expansions)) + return ToUsingPack; + + addDeclToContexts(D, ToUsingPack); + + return ToUsingPack; +} + ExpectedDecl ASTNodeImporter::VisitUnresolvedUsingValueDecl( UnresolvedUsingValueDecl *D) { DeclContext *DC, *LexicalDC; @@ -5182,7 +5720,7 @@ ASTNodeImporter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) { Importer.FromDiag(D->getLocation(), diag::note_odr_objc_missing_superclass); - return make_error<ImportError>(ImportError::NameConflict); + return make_error<ASTImportError>(ASTImportError::NameConflict); } } @@ -5221,7 +5759,7 @@ ExpectedDecl ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) { Importer.ToDiag(FoundProp->getLocation(), diag::note_odr_value_here) << FoundProp->getType(); - return make_error<ImportError>(ImportError::NameConflict); + return make_error<ASTImportError>(ASTImportError::NameConflict); } // FIXME: Check property attributes, getters, setters, etc.? @@ -5326,7 +5864,7 @@ ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) { << D->getPropertyDecl()->getDeclName() << (D->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic); - return make_error<ImportError>(ImportError::NameConflict); + return make_error<ASTImportError>(ASTImportError::NameConflict); } // For @synthesize, check that we have the same @@ -5341,7 +5879,7 @@ ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) { diag::note_odr_objc_synthesize_ivar_here) << D->getPropertyIvarDecl()->getDeclName(); - return make_error<ImportError>(ImportError::NameConflict); + return make_error<ASTImportError>(ASTImportError::NameConflict); } // Merge the existing implementation with the new implementation. @@ -5379,36 +5917,20 @@ ASTNodeImporter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) { if (const TypeConstraint *TC = D->getTypeConstraint()) { Error Err = Error::success(); - auto ToNNS = importChecked(Err, TC->getNestedNameSpecifierLoc()); - auto ToName = importChecked(Err, TC->getConceptNameInfo().getName()); - auto ToNameLoc = importChecked(Err, TC->getConceptNameInfo().getLoc()); - auto ToFoundDecl = importChecked(Err, TC->getFoundDecl()); - auto ToNamedConcept = importChecked(Err, TC->getNamedConcept()); + auto ToConceptRef = importChecked(Err, TC->getConceptReference()); auto ToIDC = importChecked(Err, TC->getImmediatelyDeclaredConstraint()); if (Err) return std::move(Err); - TemplateArgumentListInfo ToTAInfo; - const auto *ASTTemplateArgs = TC->getTemplateArgsAsWritten(); - if (ASTTemplateArgs) - if (Error Err = ImportTemplateArgumentListInfo(*ASTTemplateArgs, - ToTAInfo)) - return std::move(Err); - - ToD->setTypeConstraint(ToNNS, DeclarationNameInfo(ToName, ToNameLoc), - ToFoundDecl, ToNamedConcept, - ASTTemplateArgs ? - ASTTemplateArgumentListInfo::Create(Importer.getToContext(), - ToTAInfo) : nullptr, - ToIDC); + ToD->setTypeConstraint(ToConceptRef, ToIDC); } if (D->hasDefaultArgument()) { - Expected<TypeSourceInfo *> ToDefaultArgOrErr = - import(D->getDefaultArgumentInfo()); + Expected<TemplateArgumentLoc> ToDefaultArgOrErr = + import(D->getDefaultArgument()); if (!ToDefaultArgOrErr) return ToDefaultArgOrErr.takeError(); - ToD->setDefaultArgument(*ToDefaultArgOrErr); + ToD->setDefaultArgument(ToD->getASTContext(), *ToDefaultArgOrErr); } return ToD; @@ -5436,10 +5958,11 @@ ASTNodeImporter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) { return ToD; if (D->hasDefaultArgument()) { - ExpectedExpr ToDefaultArgOrErr = import(D->getDefaultArgument()); + Expected<TemplateArgumentLoc> ToDefaultArgOrErr = + import(D->getDefaultArgument()); if (!ToDefaultArgOrErr) return ToDefaultArgOrErr.takeError(); - ToD->setDefaultArgument(*ToDefaultArgOrErr); + ToD->setDefaultArgument(Importer.getToContext(), *ToDefaultArgOrErr); } return ToD; @@ -5467,7 +5990,8 @@ ASTNodeImporter::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) { ToD, D, Importer.getToContext(), Importer.getToContext().getTranslationUnitDecl(), *LocationOrErr, D->getDepth(), D->getPosition(), D->isParameterPack(), - (*NameOrErr).getAsIdentifierInfo(), *TemplateParamsOrErr)) + (*NameOrErr).getAsIdentifierInfo(), D->wasDeclaredWithTypename(), + *TemplateParamsOrErr)) return ToD; if (D->hasDefaultArgument()) { @@ -5504,6 +6028,18 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) { if (ToD) return ToD; + // Should check if a declaration is friend in a dependent context. + // Such templates are not linked together in a declaration chain. + // The ASTImporter strategy is to map existing forward declarations to + // imported ones only if strictly necessary, otherwise import these as new + // forward declarations. In case of the "dependent friend" declarations, new + // declarations are created, but not linked in a declaration chain. + auto IsDependentFriend = [](ClassTemplateDecl *TD) { + return TD->getFriendObjectKind() != Decl::FOK_None && + TD->getLexicalDeclContext()->isDependentContext(); + }; + bool DependentFriend = IsDependentFriend(D); + ClassTemplateDecl *FoundByLookup = nullptr; // We may already have a template of the same name; try to find and match it. @@ -5521,7 +6057,15 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) { if (!hasSameVisibilityContextAndLinkage(FoundTemplate, D)) continue; - if (IsStructuralMatch(D, FoundTemplate)) { + // FIXME: sufficient conditon for 'IgnoreTemplateParmDepth'? + bool IgnoreTemplateParmDepth = + (FoundTemplate->getFriendObjectKind() != Decl::FOK_None) != + (D->getFriendObjectKind() != Decl::FOK_None); + if (IsStructuralMatch(D, FoundTemplate, /*Complain=*/true, + IgnoreTemplateParmDepth)) { + if (DependentFriend || IsDependentFriend(FoundTemplate)) + continue; + ClassTemplateDecl *TemplateWithDef = getTemplateDefinition(FoundTemplate); if (D->isThisDeclarationADefinition() && TemplateWithDef) @@ -5596,11 +6140,6 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) { D2->setPreviousDecl(Recent); } - if (FromTemplated->isCompleteDefinition() && - !ToTemplated->isCompleteDefinition()) { - // FIXME: Import definition! - } - return D2; } @@ -5617,8 +6156,8 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateSpecializationDecl( // Import template arguments. SmallVector<TemplateArgument, 2> TemplateArgs; - if (Error Err = ImportTemplateArguments( - D->getTemplateArgs().data(), D->getTemplateArgs().size(), TemplateArgs)) + if (Error Err = + ImportTemplateArguments(D->getTemplateArgs().asArray(), TemplateArgs)) return std::move(Err); // Try to find an existing specialization with these template arguments and // template parameter list. @@ -5671,7 +6210,7 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateSpecializationDecl( } } else { // ODR violation. // FIXME HandleNameConflict - return make_error<ImportError>(ImportError::NameConflict); + return make_error<ASTImportError>(ASTImportError::NameConflict); } } @@ -5683,15 +6222,16 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateSpecializationDecl( if (!IdLocOrErr) return IdLocOrErr.takeError(); + // Import TemplateArgumentListInfo. + TemplateArgumentListInfo ToTAInfo; + if (const auto *ASTTemplateArgs = D->getTemplateArgsAsWritten()) { + if (Error Err = ImportTemplateArgumentListInfo(*ASTTemplateArgs, ToTAInfo)) + return std::move(Err); + } + // Create the specialization. ClassTemplateSpecializationDecl *D2 = nullptr; if (PartialSpec) { - // Import TemplateArgumentListInfo. - TemplateArgumentListInfo ToTAInfo; - const auto &ASTTemplateArgs = *PartialSpec->getTemplateArgsAsWritten(); - if (Error Err = ImportTemplateArgumentListInfo(ASTTemplateArgs, ToTAInfo)) - return std::move(Err); - QualType CanonInjType; if (Error Err = importInto( CanonInjType, PartialSpec->getInjectedSpecializationType())) @@ -5699,10 +6239,10 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateSpecializationDecl( CanonInjType = CanonInjType.getCanonicalType(); if (GetImportedOrCreateDecl<ClassTemplatePartialSpecializationDecl>( - D2, D, Importer.getToContext(), D->getTagKind(), DC, - *BeginLocOrErr, *IdLocOrErr, ToTPList, ClassTemplate, - llvm::makeArrayRef(TemplateArgs.data(), TemplateArgs.size()), - ToTAInfo, CanonInjType, + D2, D, Importer.getToContext(), D->getTagKind(), DC, *BeginLocOrErr, + *IdLocOrErr, ToTPList, ClassTemplate, + llvm::ArrayRef(TemplateArgs.data(), TemplateArgs.size()), + CanonInjType, cast_or_null<ClassTemplatePartialSpecializationDecl>(PrevDecl))) return D2; @@ -5713,6 +6253,11 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateSpecializationDecl( InsertPos)) // Add this partial specialization to the class template. ClassTemplate->AddPartialSpecialization(PartSpec2, InsertPos); + if (Expected<ClassTemplatePartialSpecializationDecl *> ToInstOrErr = + import(PartialSpec->getInstantiatedFromMember())) + PartSpec2->setInstantiatedFromMember(*ToInstOrErr); + else + return ToInstOrErr.takeError(); updateLookupTableForTemplateParameters(*ToTPList); } else { // Not a partial specialization. @@ -5744,28 +6289,27 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateSpecializationDecl( else return BraceRangeOrErr.takeError(); + if (Error Err = ImportTemplateParameterLists(D, D2)) + return std::move(Err); + // Import the qualifier, if any. if (auto LocOrErr = import(D->getQualifierLoc())) D2->setQualifierInfo(*LocOrErr); else return LocOrErr.takeError(); - if (auto *TSI = D->getTypeAsWritten()) { - if (auto TInfoOrErr = import(TSI)) - D2->setTypeAsWritten(*TInfoOrErr); - else - return TInfoOrErr.takeError(); + if (D->getTemplateArgsAsWritten()) + D2->setTemplateArgsAsWritten(ToTAInfo); - if (auto LocOrErr = import(D->getTemplateKeywordLoc())) - D2->setTemplateKeywordLoc(*LocOrErr); - else - return LocOrErr.takeError(); + if (auto LocOrErr = import(D->getTemplateKeywordLoc())) + D2->setTemplateKeywordLoc(*LocOrErr); + else + return LocOrErr.takeError(); - if (auto LocOrErr = import(D->getExternLoc())) - D2->setExternLoc(*LocOrErr); - else - return LocOrErr.takeError(); - } + if (auto LocOrErr = import(D->getExternKeywordLoc())) + D2->setExternKeywordLoc(*LocOrErr); + else + return LocOrErr.takeError(); if (D->getPointOfInstantiation().isValid()) { if (auto POIOrErr = import(D->getPointOfInstantiation())) @@ -5776,6 +6320,30 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateSpecializationDecl( D2->setTemplateSpecializationKind(D->getTemplateSpecializationKind()); + if (auto P = D->getInstantiatedFrom()) { + if (auto *CTD = P.dyn_cast<ClassTemplateDecl *>()) { + if (auto CTDorErr = import(CTD)) + D2->setInstantiationOf(*CTDorErr); + } else { + auto *CTPSD = cast<ClassTemplatePartialSpecializationDecl *>(P); + auto CTPSDOrErr = import(CTPSD); + if (!CTPSDOrErr) + return CTPSDOrErr.takeError(); + const TemplateArgumentList &DArgs = D->getTemplateInstantiationArgs(); + SmallVector<TemplateArgument, 2> D2ArgsVec(DArgs.size()); + for (unsigned I = 0; I < DArgs.size(); ++I) { + const TemplateArgument &DArg = DArgs[I]; + if (auto ArgOrErr = import(DArg)) + D2ArgsVec[I] = *ArgOrErr; + else + return ArgOrErr.takeError(); + } + D2->setInstantiationOf( + *CTPSDOrErr, + TemplateArgumentList::CreateCopy(Importer.getToContext(), D2ArgsVec)); + } + } + if (D->isCompleteDefinition()) if (Error Err = ImportDefinition(D, D2)) return std::move(Err); @@ -5811,14 +6379,21 @@ ExpectedDecl ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) { D->getTemplatedDecl())) continue; if (IsStructuralMatch(D, FoundTemplate)) { - // The Decl in the "From" context has a definition, but in the - // "To" context we already have a definition. + // FIXME Check for ODR error if the two definitions have + // different initializers? VarTemplateDecl *FoundDef = getTemplateDefinition(FoundTemplate); - if (D->isThisDeclarationADefinition() && FoundDef) - // FIXME Check for ODR error if the two definitions have - // different initializers? - return Importer.MapImported(D, FoundDef); - + if (D->getDeclContext()->isRecord()) { + assert(FoundTemplate->getDeclContext()->isRecord() && + "Member variable template imported as non-member, " + "inconsistent imported AST?"); + if (FoundDef) + return Importer.MapImported(D, FoundDef); + if (!D->isThisDeclarationADefinition()) + return Importer.MapImported(D, FoundTemplate); + } else { + if (FoundDef && D->isThisDeclarationADefinition()) + return Importer.MapImported(D, FoundDef); + } FoundByLookup = FoundTemplate; break; } @@ -5879,26 +6454,24 @@ ExpectedDecl ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) { ToVarTD->setPreviousDecl(Recent); } - if (DTemplated->isThisDeclarationADefinition() && - !ToTemplated->isThisDeclarationADefinition()) { - // FIXME: Import definition! - } - return ToVarTD; } ExpectedDecl ASTNodeImporter::VisitVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *D) { - // If this record has a definition in the translation unit we're coming from, - // but this particular declaration is not that definition, import the - // definition and map to that. - VarDecl *Definition = D->getDefinition(); - if (Definition && Definition != D) { - if (ExpectedDecl ImportedDefOrErr = import(Definition)) - return Importer.MapImported(D, *ImportedDefOrErr); - else - return ImportedDefOrErr.takeError(); + // A VarTemplateSpecializationDecl inherits from VarDecl, the import is done + // in an analog way (but specialized for this case). + + SmallVector<Decl *, 2> Redecls = getCanonicalForwardRedeclChain(D); + auto RedeclIt = Redecls.begin(); + // Import the first part of the decl chain. I.e. import all previous + // declarations starting from the canonical decl. + for (; RedeclIt != Redecls.end() && *RedeclIt != D; ++RedeclIt) { + ExpectedDecl RedeclOrErr = import(*RedeclIt); + if (!RedeclOrErr) + return RedeclOrErr.takeError(); } + assert(*RedeclIt == D); VarTemplateDecl *VarTemplate = nullptr; if (Error Err = importInto(VarTemplate, D->getSpecializedTemplate())) @@ -5920,120 +6493,135 @@ ExpectedDecl ASTNodeImporter::VisitVarTemplateSpecializationDecl( // Import template arguments. SmallVector<TemplateArgument, 2> TemplateArgs; - if (Error Err = ImportTemplateArguments( - D->getTemplateArgs().data(), D->getTemplateArgs().size(), TemplateArgs)) + if (Error Err = + ImportTemplateArguments(D->getTemplateArgs().asArray(), TemplateArgs)) return std::move(Err); // Try to find an existing specialization with these template arguments. void *InsertPos = nullptr; - VarTemplateSpecializationDecl *D2 = VarTemplate->findSpecialization( - TemplateArgs, InsertPos); - if (D2) { - // We already have a variable template specialization with these template - // arguments. - - // FIXME: Check for specialization vs. instantiation errors. - - if (VarDecl *FoundDef = D2->getDefinition()) { - if (!D->isThisDeclarationADefinition() || - IsStructuralMatch(D, FoundDef)) { - // The record types structurally match, or the "from" translation - // unit only had a forward declaration anyway; call it the same - // variable. - return Importer.MapImported(D, FoundDef); + VarTemplateSpecializationDecl *FoundSpecialization = + VarTemplate->findSpecialization(TemplateArgs, InsertPos); + if (FoundSpecialization) { + if (IsStructuralMatch(D, FoundSpecialization)) { + VarDecl *FoundDef = FoundSpecialization->getDefinition(); + if (D->getDeclContext()->isRecord()) { + // In a record, it is allowed only to have one optional declaration and + // one definition of the (static or constexpr) variable template. + assert( + FoundSpecialization->getDeclContext()->isRecord() && + "Member variable template specialization imported as non-member, " + "inconsistent imported AST?"); + if (FoundDef) + return Importer.MapImported(D, FoundDef); + if (!D->isThisDeclarationADefinition()) + return Importer.MapImported(D, FoundSpecialization); + } else { + // If definition is imported and there is already one, map to it. + // Otherwise create a new variable and link it to the existing. + if (FoundDef && D->isThisDeclarationADefinition()) + return Importer.MapImported(D, FoundDef); } + } else { + return make_error<ASTImportError>(ASTImportError::NameConflict); } - } else { - // Import the type. - QualType T; - if (Error Err = importInto(T, D->getType())) - return std::move(Err); + } - auto TInfoOrErr = import(D->getTypeSourceInfo()); - if (!TInfoOrErr) - return TInfoOrErr.takeError(); + VarTemplateSpecializationDecl *D2 = nullptr; - TemplateArgumentListInfo ToTAInfo; - if (Error Err = ImportTemplateArgumentListInfo( - D->getTemplateArgsInfo(), ToTAInfo)) + TemplateArgumentListInfo ToTAInfo; + if (const auto *Args = D->getTemplateArgsAsWritten()) { + if (Error Err = ImportTemplateArgumentListInfo(*Args, ToTAInfo)) return std::move(Err); + } - using PartVarSpecDecl = VarTemplatePartialSpecializationDecl; - // Create a new specialization. - if (auto *FromPartial = dyn_cast<PartVarSpecDecl>(D)) { - // Import TemplateArgumentListInfo - TemplateArgumentListInfo ArgInfos; - const auto *FromTAArgsAsWritten = FromPartial->getTemplateArgsAsWritten(); - // NOTE: FromTAArgsAsWritten and template parameter list are non-null. - if (Error Err = ImportTemplateArgumentListInfo( - *FromTAArgsAsWritten, ArgInfos)) - return std::move(Err); + using PartVarSpecDecl = VarTemplatePartialSpecializationDecl; + // Create a new specialization. + if (auto *FromPartial = dyn_cast<PartVarSpecDecl>(D)) { + auto ToTPListOrErr = import(FromPartial->getTemplateParameters()); + if (!ToTPListOrErr) + return ToTPListOrErr.takeError(); - auto ToTPListOrErr = import(FromPartial->getTemplateParameters()); - if (!ToTPListOrErr) - return ToTPListOrErr.takeError(); + PartVarSpecDecl *ToPartial; + if (GetImportedOrCreateDecl(ToPartial, D, Importer.getToContext(), DC, + *BeginLocOrErr, *IdLocOrErr, *ToTPListOrErr, + VarTemplate, QualType(), nullptr, + D->getStorageClass(), TemplateArgs)) + return ToPartial; - PartVarSpecDecl *ToPartial; - if (GetImportedOrCreateDecl(ToPartial, D, Importer.getToContext(), DC, - *BeginLocOrErr, *IdLocOrErr, *ToTPListOrErr, - VarTemplate, T, *TInfoOrErr, - D->getStorageClass(), TemplateArgs, ArgInfos)) - return ToPartial; + if (Expected<PartVarSpecDecl *> ToInstOrErr = + import(FromPartial->getInstantiatedFromMember())) + ToPartial->setInstantiatedFromMember(*ToInstOrErr); + else + return ToInstOrErr.takeError(); - if (Expected<PartVarSpecDecl *> ToInstOrErr = import( - FromPartial->getInstantiatedFromMember())) - ToPartial->setInstantiatedFromMember(*ToInstOrErr); - else - return ToInstOrErr.takeError(); - - if (FromPartial->isMemberSpecialization()) - ToPartial->setMemberSpecialization(); - - D2 = ToPartial; - - // FIXME: Use this update if VarTemplatePartialSpecializationDecl is fixed - // to adopt template parameters. - // updateLookupTableForTemplateParameters(**ToTPListOrErr); - } else { // Full specialization - if (GetImportedOrCreateDecl(D2, D, Importer.getToContext(), DC, - *BeginLocOrErr, *IdLocOrErr, VarTemplate, - T, *TInfoOrErr, - D->getStorageClass(), TemplateArgs)) - return D2; - } + if (FromPartial->isMemberSpecialization()) + ToPartial->setMemberSpecialization(); - if (D->getPointOfInstantiation().isValid()) { - if (ExpectedSLoc POIOrErr = import(D->getPointOfInstantiation())) - D2->setPointOfInstantiation(*POIOrErr); - else - return POIOrErr.takeError(); - } + D2 = ToPartial; - D2->setSpecializationKind(D->getSpecializationKind()); - D2->setTemplateArgsInfo(ToTAInfo); + // FIXME: Use this update if VarTemplatePartialSpecializationDecl is fixed + // to adopt template parameters. + // updateLookupTableForTemplateParameters(**ToTPListOrErr); + } else { // Full specialization + if (GetImportedOrCreateDecl(D2, D, Importer.getToContext(), DC, + *BeginLocOrErr, *IdLocOrErr, VarTemplate, + QualType(), nullptr, D->getStorageClass(), + TemplateArgs)) + return D2; + } - // Add this specialization to the class template. + // Update InsertPos, because preceding import calls may have invalidated + // it by adding new specializations. + if (!VarTemplate->findSpecialization(TemplateArgs, InsertPos)) VarTemplate->AddSpecialization(D2, InsertPos); - // Import the qualifier, if any. - if (auto LocOrErr = import(D->getQualifierLoc())) - D2->setQualifierInfo(*LocOrErr); + QualType T; + if (Error Err = importInto(T, D->getType())) + return std::move(Err); + D2->setType(T); + + auto TInfoOrErr = import(D->getTypeSourceInfo()); + if (!TInfoOrErr) + return TInfoOrErr.takeError(); + D2->setTypeSourceInfo(*TInfoOrErr); + + if (D->getPointOfInstantiation().isValid()) { + if (ExpectedSLoc POIOrErr = import(D->getPointOfInstantiation())) + D2->setPointOfInstantiation(*POIOrErr); else - return LocOrErr.takeError(); + return POIOrErr.takeError(); + } - if (D->isConstexpr()) - D2->setConstexpr(true); + D2->setSpecializationKind(D->getSpecializationKind()); - // Add the specialization to this context. - D2->setLexicalDeclContext(LexicalDC); - LexicalDC->addDeclInternal(D2); + if (D->getTemplateArgsAsWritten()) + D2->setTemplateArgsAsWritten(ToTAInfo); - D2->setAccess(D->getAccess()); - } + if (auto LocOrErr = import(D->getQualifierLoc())) + D2->setQualifierInfo(*LocOrErr); + else + return LocOrErr.takeError(); + + if (D->isConstexpr()) + D2->setConstexpr(true); + + D2->setAccess(D->getAccess()); if (Error Err = ImportInitializer(D, D2)) return std::move(Err); + if (FoundSpecialization) + D2->setPreviousDecl(FoundSpecialization->getMostRecentDecl()); + + addDeclToContexts(D, D2); + + // Import the rest of the chain. I.e. import all subsequent declarations. + for (++RedeclIt; RedeclIt != Redecls.end(); ++RedeclIt) { + ExpectedDecl RedeclOrErr = import(*RedeclIt); + if (!RedeclOrErr) + return RedeclOrErr.takeError(); + } + return D2; } @@ -6088,20 +6676,24 @@ ASTNodeImporter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) { if (Error Err = importInto(TemplatedFD, D->getTemplatedDecl())) return std::move(Err); - // Template parameters of the ClassTemplateDecl and FunctionTemplateDecl are - // shared, if the FunctionTemplateDecl is a deduction guide for the class. - // At import the ClassTemplateDecl object is always created first (FIXME: is - // this really true?) because the dependency, then the FunctionTemplateDecl. - // The DeclContext of the template parameters is changed when the - // FunctionTemplateDecl is created, but was set already when the class - // template was created. So here it is not the TU (default value) any more. - // FIXME: The DeclContext of the parameters is now set finally to the - // CXXDeductionGuideDecl object that was imported later. This may not be the - // same that is in the original AST, specially if there are multiple deduction - // guides. - DeclContext *OldParamDC = nullptr; - if (Params->size() > 0) - OldParamDC = Params->getParam(0)->getDeclContext(); + // At creation of the template the template parameters are "adopted" + // (DeclContext is changed). After this possible change the lookup table + // must be updated. + // At deduction guides the DeclContext of the template parameters may be + // different from what we would expect, it may be the class template, or a + // probably different CXXDeductionGuideDecl. This may come from the fact that + // the template parameter objects may be shared between deduction guides or + // the class template, and at creation of multiple FunctionTemplateDecl + // objects (for deduction guides) the same parameters are re-used. The + // "adoption" happens multiple times with different parent, even recursively + // for TemplateTemplateParmDecl. The same happens at import when the + // FunctionTemplateDecl objects are created, but in different order. + // In this way the DeclContext of these template parameters is not necessarily + // the same as in the "from" context. + SmallVector<DeclContext *, 2> OldParamDC; + OldParamDC.reserve(Params->size()); + llvm::transform(*Params, std::back_inserter(OldParamDC), + [](NamedDecl *ND) { return ND->getDeclContext(); }); FunctionTemplateDecl *ToFunc; if (GetImportedOrCreateDecl(ToFunc, D, Importer.getToContext(), DC, Loc, Name, @@ -6112,8 +6704,13 @@ ASTNodeImporter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) { ToFunc->setAccess(D->getAccess()); ToFunc->setLexicalDeclContext(LexicalDC); - LexicalDC->addDeclInternal(ToFunc); - updateLookupTableForTemplateParameters(*Params, OldParamDC); + addDeclToContexts(D, ToFunc); + + ASTImporterLookupTable *LT = Importer.SharedState->getLookupTable(); + if (LT && !OldParamDC.empty()) { + for (unsigned int I = 0; I < OldParamDC.size(); ++I) + LT->updateForced(Params->getParam(I), OldParamDC[I]); + } if (FoundByLookup) { auto *Recent = @@ -6139,13 +6736,13 @@ ASTNodeImporter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) { ExpectedStmt ASTNodeImporter::VisitStmt(Stmt *S) { Importer.FromDiag(S->getBeginLoc(), diag::err_unsupported_ast_node) << S->getStmtClassName(); - return make_error<ImportError>(ImportError::UnsupportedConstruct); + return make_error<ASTImportError>(ASTImportError::UnsupportedConstruct); } ExpectedStmt ASTNodeImporter::VisitGCCAsmStmt(GCCAsmStmt *S) { if (Importer.returnWithErrorInTest()) - return make_error<ImportError>(ImportError::UnsupportedConstruct); + return make_error<ASTImportError>(ASTImportError::UnsupportedConstruct); SmallVector<IdentifierInfo *, 4> Names; for (unsigned I = 0, E = S->getNumOutputs(); I != E; I++) { IdentifierInfo *ToII = Importer.Import(S->getOutputIdentifier(I)); @@ -6258,9 +6855,10 @@ ExpectedStmt ASTNodeImporter::VisitCompoundStmt(CompoundStmt *S) { if (!ToRBracLocOrErr) return ToRBracLocOrErr.takeError(); - return CompoundStmt::Create( - Importer.getToContext(), ToStmts, - *ToLBracLocOrErr, *ToRBracLocOrErr); + FPOptionsOverride FPO = + S->hasStoredFPFeatures() ? S->getStoredFPFeatures() : FPOptionsOverride(); + return CompoundStmt::Create(Importer.getToContext(), ToStmts, FPO, + *ToLBracLocOrErr, *ToRBracLocOrErr); } ExpectedStmt ASTNodeImporter::VisitCaseStmt(CaseStmt *S) { @@ -6339,7 +6937,7 @@ ExpectedStmt ASTNodeImporter::VisitIfStmt(IfStmt *S) { if (Err) return std::move(Err); - return IfStmt::Create(Importer.getToContext(), ToIfLoc, S->isConstexpr(), + return IfStmt::Create(Importer.getToContext(), ToIfLoc, S->getStatementKind(), ToInit, ToConditionVariable, ToCond, ToLParenLoc, ToRParenLoc, ToThen, ToElseLoc, ToElse); } @@ -6515,8 +7113,8 @@ ExpectedStmt ASTNodeImporter::VisitCXXTryStmt(CXXTryStmt *S) { return ToHandlerOrErr.takeError(); } - return CXXTryStmt::Create( - Importer.getToContext(), *ToTryLocOrErr,*ToTryBlockOrErr, ToHandlers); + return CXXTryStmt::Create(Importer.getToContext(), *ToTryLocOrErr, + cast<CompoundStmt>(*ToTryBlockOrErr), ToHandlers); } ExpectedStmt ASTNodeImporter::VisitCXXForRangeStmt(CXXForRangeStmt *S) { @@ -6652,11 +7250,12 @@ ExpectedStmt ASTNodeImporter::VisitObjCAutoreleasePoolStmt( ExpectedStmt ASTNodeImporter::VisitExpr(Expr *E) { Importer.FromDiag(E->getBeginLoc(), diag::err_unsupported_ast_node) << E->getStmtClassName(); - return make_error<ImportError>(ImportError::UnsupportedConstruct); + return make_error<ASTImportError>(ASTImportError::UnsupportedConstruct); } ExpectedStmt ASTNodeImporter::VisitSourceLocExpr(SourceLocExpr *E) { Error Err = Error::success(); + auto ToType = importChecked(Err, E->getType()); auto BLoc = importChecked(Err, E->getBeginLoc()); auto RParenLoc = importChecked(Err, E->getEndLoc()); if (Err) @@ -6666,8 +7265,8 @@ ExpectedStmt ASTNodeImporter::VisitSourceLocExpr(SourceLocExpr *E) { return ParentContextOrErr.takeError(); return new (Importer.getToContext()) - SourceLocExpr(Importer.getToContext(), E->getIdentKind(), BLoc, RParenLoc, - *ParentContextOrErr); + SourceLocExpr(Importer.getToContext(), E->getIdentKind(), ToType, BLoc, + RParenLoc, *ParentContextOrErr); } ExpectedStmt ASTNodeImporter::VisitVAArgExpr(VAArgExpr *E) { @@ -6710,6 +7309,39 @@ ExpectedStmt ASTNodeImporter::VisitChooseExpr(ChooseExpr *E) { ToRParenLoc, CondIsTrue); } +ExpectedStmt ASTNodeImporter::VisitConvertVectorExpr(ConvertVectorExpr *E) { + Error Err = Error::success(); + auto *ToSrcExpr = importChecked(Err, E->getSrcExpr()); + auto ToRParenLoc = importChecked(Err, E->getRParenLoc()); + auto ToBuiltinLoc = importChecked(Err, E->getBuiltinLoc()); + auto ToType = importChecked(Err, E->getType()); + auto *ToTSI = importChecked(Err, E->getTypeSourceInfo()); + if (Err) + return std::move(Err); + + return new (Importer.getToContext()) + ConvertVectorExpr(ToSrcExpr, ToTSI, ToType, E->getValueKind(), + E->getObjectKind(), ToBuiltinLoc, ToRParenLoc); +} + +ExpectedStmt ASTNodeImporter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) { + Error Err = Error::success(); + auto ToRParenLoc = importChecked(Err, E->getRParenLoc()); + auto ToBeginLoc = importChecked(Err, E->getBeginLoc()); + auto ToType = importChecked(Err, E->getType()); + const unsigned NumSubExprs = E->getNumSubExprs(); + + llvm::SmallVector<Expr *, 8> ToSubExprs; + llvm::ArrayRef<Expr *> FromSubExprs(E->getSubExprs(), NumSubExprs); + ToSubExprs.resize(NumSubExprs); + + if ((Err = ImportContainerChecked(FromSubExprs, ToSubExprs))) + return std::move(Err); + + return new (Importer.getToContext()) ShuffleVectorExpr( + Importer.getToContext(), ToSubExprs, ToType, ToBeginLoc, ToRParenLoc); +} + ExpectedStmt ASTNodeImporter::VisitGNUNullExpr(GNUNullExpr *E) { ExpectedType TypeOrErr = import(E->getType()); if (!TypeOrErr) @@ -6726,7 +7358,14 @@ ExpectedStmt ASTNodeImporter::VisitGenericSelectionExpr(GenericSelectionExpr *E) { Error Err = Error::success(); auto ToGenericLoc = importChecked(Err, E->getGenericLoc()); - auto *ToControllingExpr = importChecked(Err, E->getControllingExpr()); + Expr *ToControllingExpr = nullptr; + TypeSourceInfo *ToControllingType = nullptr; + if (E->isExprPredicate()) + ToControllingExpr = importChecked(Err, E->getControllingExpr()); + else + ToControllingType = importChecked(Err, E->getControllingType()); + assert((ToControllingExpr || ToControllingType) && + "Either the controlling expr or type must be nonnull"); auto ToDefaultLoc = importChecked(Err, E->getDefaultLoc()); auto ToRParenLoc = importChecked(Err, E->getRParenLoc()); if (Err) @@ -6744,15 +7383,27 @@ ASTNodeImporter::VisitGenericSelectionExpr(GenericSelectionExpr *E) { const ASTContext &ToCtx = Importer.getToContext(); if (E->isResultDependent()) { + if (ToControllingExpr) { + return GenericSelectionExpr::Create( + ToCtx, ToGenericLoc, ToControllingExpr, llvm::ArrayRef(ToAssocTypes), + llvm::ArrayRef(ToAssocExprs), ToDefaultLoc, ToRParenLoc, + E->containsUnexpandedParameterPack()); + } return GenericSelectionExpr::Create( - ToCtx, ToGenericLoc, ToControllingExpr, - llvm::makeArrayRef(ToAssocTypes), llvm::makeArrayRef(ToAssocExprs), - ToDefaultLoc, ToRParenLoc, E->containsUnexpandedParameterPack()); + ToCtx, ToGenericLoc, ToControllingType, llvm::ArrayRef(ToAssocTypes), + llvm::ArrayRef(ToAssocExprs), ToDefaultLoc, ToRParenLoc, + E->containsUnexpandedParameterPack()); } + if (ToControllingExpr) { + return GenericSelectionExpr::Create( + ToCtx, ToGenericLoc, ToControllingExpr, llvm::ArrayRef(ToAssocTypes), + llvm::ArrayRef(ToAssocExprs), ToDefaultLoc, ToRParenLoc, + E->containsUnexpandedParameterPack(), E->getResultIndex()); + } return GenericSelectionExpr::Create( - ToCtx, ToGenericLoc, ToControllingExpr, llvm::makeArrayRef(ToAssocTypes), - llvm::makeArrayRef(ToAssocExprs), ToDefaultLoc, ToRParenLoc, + ToCtx, ToGenericLoc, ToControllingType, llvm::ArrayRef(ToAssocTypes), + llvm::ArrayRef(ToAssocExprs), ToDefaultLoc, ToRParenLoc, E->containsUnexpandedParameterPack(), E->getResultIndex()); } @@ -6766,7 +7417,8 @@ ExpectedStmt ASTNodeImporter::VisitPredefinedExpr(PredefinedExpr *E) { return std::move(Err); return PredefinedExpr::Create(Importer.getToContext(), ToBeginLoc, ToType, - E->getIdentKind(), ToFunctionName); + E->getIdentKind(), E->isTransparent(), + ToFunctionName); } ExpectedStmt ASTNodeImporter::VisitDeclRefExpr(DeclRefExpr *E) { @@ -6804,6 +7456,7 @@ ExpectedStmt ASTNodeImporter::VisitDeclRefExpr(DeclRefExpr *E) { E->getValueKind(), ToFoundD, ToResInfo, E->isNonOdrUse()); if (E->hadMultipleCandidates()) ToE->setHadMultipleCandidates(true); + ToE->setIsImmediateEscalating(E->isImmediateEscalating()); return ToE; } @@ -7047,10 +7700,17 @@ ExpectedStmt ASTNodeImporter::VisitUnaryOperator(UnaryOperator *E) { if (Err) return std::move(Err); - return UnaryOperator::Create( - Importer.getToContext(), ToSubExpr, E->getOpcode(), ToType, - E->getValueKind(), E->getObjectKind(), ToOperatorLoc, E->canOverflow(), - E->getFPOptionsOverride()); + auto *UO = UnaryOperator::CreateEmpty(Importer.getToContext(), + E->hasStoredFPFeatures()); + UO->setType(ToType); + UO->setSubExpr(ToSubExpr); + UO->setOpcode(E->getOpcode()); + UO->setOperatorLoc(ToOperatorLoc); + UO->setCanOverflow(E->canOverflow()); + if (E->hasStoredFPFeatures()) + UO->setStoredFPFeatures(E->getStoredFPFeatures()); + + return UO; } ExpectedStmt @@ -7094,7 +7754,7 @@ ExpectedStmt ASTNodeImporter::VisitBinaryOperator(BinaryOperator *E) { return BinaryOperator::Create( Importer.getToContext(), ToLHS, ToRHS, E->getOpcode(), ToType, E->getValueKind(), E->getObjectKind(), ToOperatorLoc, - E->getFPFeatures(Importer.getFromContext().getLangOpts())); + E->getFPFeatures()); } ExpectedStmt ASTNodeImporter::VisitConditionalOperator(ConditionalOperator *E) { @@ -7133,6 +7793,17 @@ ASTNodeImporter::VisitBinaryConditionalOperator(BinaryConditionalOperator *E) { E->getObjectKind()); } +ExpectedStmt ASTNodeImporter::VisitCXXRewrittenBinaryOperator( + CXXRewrittenBinaryOperator *E) { + Error Err = Error::success(); + auto ToSemanticForm = importChecked(Err, E->getSemanticForm()); + if (Err) + return std::move(Err); + + return new (Importer.getToContext()) + CXXRewrittenBinaryOperator(ToSemanticForm, E->isReversed()); +} + ExpectedStmt ASTNodeImporter::VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E) { Error Err = Error::success(); auto ToBeginLoc = importChecked(Err, E->getBeginLoc()); @@ -7205,7 +7876,7 @@ ASTNodeImporter::VisitCompoundAssignOperator(CompoundAssignOperator *E) { return CompoundAssignOperator::Create( Importer.getToContext(), ToLHS, ToRHS, E->getOpcode(), ToType, E->getValueKind(), E->getObjectKind(), ToOperatorLoc, - E->getFPFeatures(Importer.getFromContext().getLangOpts()), + E->getFPFeatures(), ToComputationLHSType, ToComputationResultType); } @@ -7293,9 +7964,21 @@ ExpectedStmt ASTNodeImporter::VisitExplicitCastExpr(ExplicitCastExpr *E) { *ToLParenLocOrErr, OCE->getBridgeKind(), E->getCastKind(), *ToBridgeKeywordLocOrErr, ToTypeInfoAsWritten, ToSubExpr); } + case Stmt::BuiltinBitCastExprClass: { + auto *BBC = cast<BuiltinBitCastExpr>(E); + ExpectedSLoc ToKWLocOrErr = import(BBC->getBeginLoc()); + if (!ToKWLocOrErr) + return ToKWLocOrErr.takeError(); + ExpectedSLoc ToRParenLocOrErr = import(BBC->getEndLoc()); + if (!ToRParenLocOrErr) + return ToRParenLocOrErr.takeError(); + return new (Importer.getToContext()) BuiltinBitCastExpr( + ToType, E->getValueKind(), E->getCastKind(), ToSubExpr, + ToTypeInfoAsWritten, *ToKWLocOrErr, *ToRParenLocOrErr); + } default: llvm_unreachable("Cast expression of unsupported type!"); - return make_error<ImportError>(ImportError::UnsupportedConstruct); + return make_error<ASTImportError>(ASTImportError::UnsupportedConstruct); } } @@ -7414,15 +8097,23 @@ ExpectedStmt ASTNodeImporter::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) { // see VisitParmVarDecl). ParmVarDecl *ToParam = *ToParamOrErr; if (!ToParam->getDefaultArg()) { - Optional<ParmVarDecl *> FromParam = Importer.getImportedFromDecl(ToParam); + std::optional<ParmVarDecl *> FromParam = + Importer.getImportedFromDecl(ToParam); assert(FromParam && "ParmVarDecl was not imported?"); if (Error Err = ImportDefaultArgOfParmVarDecl(*FromParam, ToParam)) return std::move(Err); } - + Expr *RewrittenInit = nullptr; + if (E->hasRewrittenInit()) { + ExpectedExpr ExprOrErr = import(E->getRewrittenExpr()); + if (!ExprOrErr) + return ExprOrErr.takeError(); + RewrittenInit = ExprOrErr.get(); + } return CXXDefaultArgExpr::Create(Importer.getToContext(), *ToUsedLocOrErr, - *ToParamOrErr, *UsedContextOrErr); + *ToParamOrErr, RewrittenInit, + *UsedContextOrErr); } ExpectedStmt @@ -7540,16 +8231,14 @@ ExpectedStmt ASTNodeImporter::VisitSizeOfPackExpr(SizeOfPackExpr *E) { if (Err) return std::move(Err); - Optional<unsigned> Length; + std::optional<unsigned> Length; if (!E->isValueDependent()) Length = E->getPackLength(); SmallVector<TemplateArgument, 8> ToPartialArguments; if (E->isPartiallySubstituted()) { - if (Error Err = ImportTemplateArguments( - E->getPartialArguments().data(), - E->getPartialArguments().size(), - ToPartialArguments)) + if (Error Err = ImportTemplateArguments(E->getPartialArguments(), + ToPartialArguments)) return std::move(Err); } @@ -7615,12 +8304,14 @@ ExpectedStmt ASTNodeImporter::VisitCXXConstructExpr(CXXConstructExpr *E) { if (Error Err = ImportContainerChecked(E->arguments(), ToArgs)) return std::move(Err); - return CXXConstructExpr::Create( + CXXConstructExpr *ToE = CXXConstructExpr::Create( Importer.getToContext(), ToType, ToLocation, ToConstructor, E->isElidable(), ToArgs, E->hadMultipleCandidates(), E->isListInitialization(), E->isStdInitListInitialization(), E->requiresZeroInitialization(), E->getConstructionKind(), ToParenOrBraceRange); + ToE->setIsImmediateEscalating(E->isImmediateEscalating()); + return ToE; } ExpectedStmt ASTNodeImporter::VisitExprWithCleanups(ExprWithCleanups *E) { @@ -7663,8 +8354,8 @@ ExpectedStmt ASTNodeImporter::VisitCXXThisExpr(CXXThisExpr *E) { if (!ToLocationOrErr) return ToLocationOrErr.takeError(); - return new (Importer.getToContext()) CXXThisExpr( - *ToLocationOrErr, *ToTypeOrErr, E->isImplicit()); + return CXXThisExpr::Create(Importer.getToContext(), *ToLocationOrErr, + *ToTypeOrErr, E->isImplicit()); } ExpectedStmt ASTNodeImporter::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) { @@ -7676,8 +8367,8 @@ ExpectedStmt ASTNodeImporter::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) { if (!ToLocationOrErr) return ToLocationOrErr.takeError(); - return new (Importer.getToContext()) CXXBoolLiteralExpr( - E->getValue(), *ToTypeOrErr, *ToLocationOrErr); + return CXXBoolLiteralExpr::Create(Importer.getToContext(), E->getValue(), + *ToTypeOrErr, *ToLocationOrErr); } ExpectedStmt ASTNodeImporter::VisitMemberExpr(MemberExpr *E) { @@ -7728,8 +8419,8 @@ ASTNodeImporter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) { return std::move(Err); PseudoDestructorTypeStorage Storage; - if (IdentifierInfo *FromII = E->getDestroyedTypeIdentifier()) { - IdentifierInfo *ToII = Importer.Import(FromII); + if (const IdentifierInfo *FromII = E->getDestroyedTypeIdentifier()) { + const IdentifierInfo *ToII = Importer.Import(FromII); ExpectedSLoc ToDestroyedTypeLocOrErr = import(E->getDestroyedTypeLoc()); if (!ToDestroyedTypeLocOrErr) return ToDestroyedTypeLocOrErr.takeError(); @@ -7839,7 +8530,7 @@ ExpectedStmt ASTNodeImporter::VisitCXXUnresolvedConstructExpr( return CXXUnresolvedConstructExpr::Create( Importer.getToContext(), ToType, ToTypeSourceInfo, ToLParenLoc, - llvm::makeArrayRef(ToArgs), ToRParenLoc); + llvm::ArrayRef(ToArgs), ToRParenLoc, E->isListInitialization()); } ExpectedStmt @@ -7881,16 +8572,21 @@ ASTNodeImporter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) { if (!ToTemplateKeywordLocOrErr) return ToTemplateKeywordLocOrErr.takeError(); + const bool KnownDependent = + (E->getDependence() & ExprDependence::TypeValue) == + ExprDependence::TypeValue; return UnresolvedLookupExpr::Create( Importer.getToContext(), *ToNamingClassOrErr, *ToQualifierLocOrErr, *ToTemplateKeywordLocOrErr, ToNameInfo, E->requiresADL(), &ToTAInfo, - ToDecls.begin(), ToDecls.end()); + ToDecls.begin(), ToDecls.end(), KnownDependent, + /*KnownInstantiationDependent=*/E->isInstantiationDependent()); } return UnresolvedLookupExpr::Create( Importer.getToContext(), *ToNamingClassOrErr, *ToQualifierLocOrErr, - ToNameInfo, E->requiresADL(), E->isOverloaded(), ToDecls.begin(), - ToDecls.end()); + ToNameInfo, E->requiresADL(), ToDecls.begin(), ToDecls.end(), + /*KnownDependent=*/E->isTypeDependent(), + /*KnownInstantiationDependent=*/E->isInstantiationDependent()); } ExpectedStmt @@ -8101,8 +8797,31 @@ ExpectedStmt ASTNodeImporter::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *E) { if (!UsedContextOrErr) return UsedContextOrErr.takeError(); - return CXXDefaultInitExpr::Create( - Importer.getToContext(), *ToBeginLocOrErr, *ToFieldOrErr, *UsedContextOrErr); + FieldDecl *ToField = *ToFieldOrErr; + assert(ToField->hasInClassInitializer() && + "Field should have in-class initializer if there is a default init " + "expression that uses it."); + if (!ToField->getInClassInitializer()) { + // The in-class initializer may be not yet set in "To" AST even if the + // field is already there. This must be set here to make construction of + // CXXDefaultInitExpr work. + auto ToInClassInitializerOrErr = + import(E->getField()->getInClassInitializer()); + if (!ToInClassInitializerOrErr) + return ToInClassInitializerOrErr.takeError(); + ToField->setInClassInitializer(*ToInClassInitializerOrErr); + } + + Expr *RewrittenInit = nullptr; + if (E->hasRewrittenInit()) { + ExpectedExpr ExprOrErr = import(E->getRewrittenExpr()); + if (!ExprOrErr) + return ExprOrErr.takeError(); + RewrittenInit = ExprOrErr.get(); + } + + return CXXDefaultInitExpr::Create(Importer.getToContext(), *ToBeginLocOrErr, + ToField, *UsedContextOrErr, RewrittenInit); } ExpectedStmt ASTNodeImporter::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) { @@ -8141,7 +8860,7 @@ ExpectedStmt ASTNodeImporter::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) { ToOperatorLoc, ToRParenLoc, ToAngleBrackets); } else { llvm_unreachable("Unknown cast type"); - return make_error<ImportError>(); + return make_error<ASTImportError>(); } } @@ -8150,14 +8869,14 @@ ExpectedStmt ASTNodeImporter::VisitSubstNonTypeTemplateParmExpr( Error Err = Error::success(); auto ToType = importChecked(Err, E->getType()); auto ToExprLoc = importChecked(Err, E->getExprLoc()); - auto ToParameter = importChecked(Err, E->getParameter()); + auto ToAssociatedDecl = importChecked(Err, E->getAssociatedDecl()); auto ToReplacement = importChecked(Err, E->getReplacement()); if (Err) return std::move(Err); return new (Importer.getToContext()) SubstNonTypeTemplateParmExpr( - ToType, E->getValueKind(), ToExprLoc, ToParameter, - E->isReferenceParameter(), ToReplacement); + ToType, E->getValueKind(), ToExprLoc, ToReplacement, ToAssociatedDecl, + E->getIndex(), E->getPackIndex(), E->isReferenceParameter()); } ExpectedStmt ASTNodeImporter::VisitTypeTraitExpr(TypeTraitExpr *E) { @@ -8258,13 +8977,13 @@ ASTImporter::ASTImporter(ASTContext &ToContext, FileManager &ToFileManager, ASTImporter::~ASTImporter() = default; -Optional<unsigned> ASTImporter::getFieldIndex(Decl *F) { +std::optional<unsigned> ASTImporter::getFieldIndex(Decl *F) { assert(F && (isa<FieldDecl>(*F) || isa<IndirectFieldDecl>(*F)) && "Try to get field index for non-field."); auto *Owner = dyn_cast<RecordDecl>(F->getDeclContext()); if (!Owner) - return None; + return std::nullopt; unsigned Index = 0; for (const auto *D : Owner->decls()) { @@ -8277,7 +8996,7 @@ Optional<unsigned> ASTImporter::getFieldIndex(Decl *F) { llvm_unreachable("Field was not found in its parent context."); - return None; + return std::nullopt; } ASTImporter::FoundDeclsTy @@ -8336,10 +9055,10 @@ ASTImporter::Import(ExprWithCleanups::CleanupObject From) { // FIXME: Handle BlockDecl when we implement importing BlockExpr in // ASTNodeImporter. - return make_error<ImportError>(ImportError::UnsupportedConstruct); + return make_error<ASTImportError>(ASTImportError::UnsupportedConstruct); } -Expected<const Type *> ASTImporter::Import(const Type *FromT) { +ExpectedTypePtr ASTImporter::Import(const Type *FromT) { if (!FromT) return FromT; @@ -8349,7 +9068,7 @@ Expected<const Type *> ASTImporter::Import(const Type *FromT) { if (Pos != ImportedTypes.end()) return Pos->second; - // Import the type + // Import the type. ASTNodeImporter Importer(*this); ExpectedType ToTOrErr = Importer.Visit(FromT); if (!ToTOrErr) @@ -8365,7 +9084,7 @@ Expected<QualType> ASTImporter::Import(QualType FromT) { if (FromT.isNull()) return QualType{}; - Expected<const Type *> ToTyOrErr = Import(FromT.getTypePtr()); + ExpectedTypePtr ToTyOrErr = Import(FromT.getTypePtr()); if (!ToTyOrErr) return ToTyOrErr.takeError(); @@ -8388,67 +9107,291 @@ Expected<TypeSourceInfo *> ASTImporter::Import(TypeSourceInfo *FromTSI) { return ToContext.getTrivialTypeSourceInfo(*TOrErr, *BeginLocOrErr); } -Expected<Attr *> ASTImporter::Import(const Attr *FromAttr) { +namespace { +// To use this object, it should be created before the new attribute is created, +// and destructed after it is created. The construction already performs the +// import of the data. +template <typename T> struct AttrArgImporter { + AttrArgImporter(const AttrArgImporter<T> &) = delete; + AttrArgImporter(AttrArgImporter<T> &&) = default; + AttrArgImporter<T> &operator=(const AttrArgImporter<T> &) = delete; + AttrArgImporter<T> &operator=(AttrArgImporter<T> &&) = default; + + AttrArgImporter(ASTNodeImporter &I, Error &Err, const T &From) + : To(I.importChecked(Err, From)) {} + + const T &value() { return To; } + +private: + T To; +}; + +// To use this object, it should be created before the new attribute is created, +// and destructed after it is created. The construction already performs the +// import of the data. The array data is accessible in a pointer form, this form +// is used by the attribute classes. This object should be created once for the +// array data to be imported (the array size is not imported, just copied). +template <typename T> struct AttrArgArrayImporter { + AttrArgArrayImporter(const AttrArgArrayImporter<T> &) = delete; + AttrArgArrayImporter(AttrArgArrayImporter<T> &&) = default; + AttrArgArrayImporter<T> &operator=(const AttrArgArrayImporter<T> &) = delete; + AttrArgArrayImporter<T> &operator=(AttrArgArrayImporter<T> &&) = default; + + AttrArgArrayImporter(ASTNodeImporter &I, Error &Err, + const llvm::iterator_range<T *> &From, + unsigned ArraySize) { + if (Err) + return; + To.reserve(ArraySize); + Err = I.ImportContainerChecked(From, To); + } + + T *value() { return To.data(); } + +private: + llvm::SmallVector<T, 2> To; +}; + +class AttrImporter { + Error Err{Error::success()}; Attr *ToAttr = nullptr; - SourceRange ToRange; - if (Error Err = importInto(ToRange, FromAttr->getRange())) - return std::move(Err); + ASTImporter &Importer; + ASTNodeImporter NImporter; + +public: + AttrImporter(ASTImporter &I) : Importer(I), NImporter(I) {} + + // Useful for accessing the imported attribute. + template <typename T> T *castAttrAs() { return cast<T>(ToAttr); } + template <typename T> const T *castAttrAs() const { return cast<T>(ToAttr); } + + // Create an "importer" for an attribute parameter. + // Result of the 'value()' of that object is to be passed to the function + // 'importAttr', in the order that is expected by the attribute class. + template <class T> AttrArgImporter<T> importArg(const T &From) { + return AttrArgImporter<T>(NImporter, Err, From); + } + + // Create an "importer" for an attribute parameter that has array type. + // Result of the 'value()' of that object is to be passed to the function + // 'importAttr', then the size of the array as next argument. + template <typename T> + AttrArgArrayImporter<T> importArrayArg(const llvm::iterator_range<T *> &From, + unsigned ArraySize) { + return AttrArgArrayImporter<T>(NImporter, Err, From, ArraySize); + } + + // Create an attribute object with the specified arguments. + // The 'FromAttr' is the original (not imported) attribute, the 'ImportedArg' + // should be values that are passed to the 'Create' function of the attribute. + // (The 'Create' with 'ASTContext' first and 'AttributeCommonInfo' last is + // used here.) As much data is copied or imported from the old attribute + // as possible. The passed arguments should be already imported. + // If an import error happens, the internal error is set to it, and any + // further import attempt is ignored. + template <typename T, typename... Arg> + void importAttr(const T *FromAttr, Arg &&...ImportedArg) { + static_assert(std::is_base_of<Attr, T>::value, + "T should be subclass of Attr."); + assert(!ToAttr && "Use one AttrImporter to import one Attribute object."); + + const IdentifierInfo *ToAttrName = Importer.Import(FromAttr->getAttrName()); + const IdentifierInfo *ToScopeName = + Importer.Import(FromAttr->getScopeName()); + SourceRange ToAttrRange = + NImporter.importChecked(Err, FromAttr->getRange()); + SourceLocation ToScopeLoc = + NImporter.importChecked(Err, FromAttr->getScopeLoc()); + + if (Err) + return; + + AttributeCommonInfo ToI(ToAttrName, ToScopeName, ToAttrRange, ToScopeLoc, + FromAttr->getParsedKind(), FromAttr->getForm()); + // The "SemanticSpelling" is not needed to be passed to the constructor. + // That value is recalculated from the SpellingListIndex if needed. + ToAttr = T::Create(Importer.getToContext(), + std::forward<Arg>(ImportedArg)..., ToI); + + ToAttr->setImplicit(FromAttr->isImplicit()); + ToAttr->setPackExpansion(FromAttr->isPackExpansion()); + if (auto *ToInheritableAttr = dyn_cast<InheritableAttr>(ToAttr)) + ToInheritableAttr->setInherited(FromAttr->isInherited()); + } + + // Create a clone of the 'FromAttr' and import its source range only. + // This causes objects with invalid references to be created if the 'FromAttr' + // contains other data that should be imported. + void cloneAttr(const Attr *FromAttr) { + assert(!ToAttr && "Use one AttrImporter to import one Attribute object."); + + SourceRange ToRange = NImporter.importChecked(Err, FromAttr->getRange()); + if (Err) + return; + + ToAttr = FromAttr->clone(Importer.getToContext()); + ToAttr->setRange(ToRange); + ToAttr->setAttrName(Importer.Import(FromAttr->getAttrName())); + } + + // Get the result of the previous import attempt (can be used only once). + llvm::Expected<Attr *> getResult() && { + if (Err) + return std::move(Err); + assert(ToAttr && "Attribute should be created."); + return ToAttr; + } +}; +} // namespace + +Expected<Attr *> ASTImporter::Import(const Attr *FromAttr) { + AttrImporter AI(*this); // FIXME: Is there some kind of AttrVisitor to use here? switch (FromAttr->getKind()) { case attr::Aligned: { auto *From = cast<AlignedAttr>(FromAttr); - AlignedAttr *To; - auto CreateAlign = [&](bool IsAlignmentExpr, void *Alignment) { - return AlignedAttr::Create(ToContext, IsAlignmentExpr, Alignment, ToRange, - From->getSyntax(), - From->getSemanticSpelling()); - }; - if (From->isAlignmentExpr()) { - if (auto ToEOrErr = Import(From->getAlignmentExpr())) - To = CreateAlign(true, *ToEOrErr); - else - return ToEOrErr.takeError(); - } else { - if (auto ToTOrErr = Import(From->getAlignmentType())) - To = CreateAlign(false, *ToTOrErr); - else - return ToTOrErr.takeError(); - } - To->setInherited(From->isInherited()); - To->setPackExpansion(From->isPackExpansion()); - To->setImplicit(From->isImplicit()); - ToAttr = To; + if (From->isAlignmentExpr()) + AI.importAttr(From, true, AI.importArg(From->getAlignmentExpr()).value()); + else + AI.importAttr(From, false, + AI.importArg(From->getAlignmentType()).value()); + break; + } + + case attr::AlignValue: { + auto *From = cast<AlignValueAttr>(FromAttr); + AI.importAttr(From, AI.importArg(From->getAlignment()).value()); break; } + case attr::Format: { const auto *From = cast<FormatAttr>(FromAttr); - FormatAttr *To; - IdentifierInfo *ToAttrType = Import(From->getType()); - To = FormatAttr::Create(ToContext, ToAttrType, From->getFormatIdx(), - From->getFirstArg(), ToRange, From->getSyntax()); - To->setInherited(From->isInherited()); - ToAttr = To; + AI.importAttr(From, Import(From->getType()), From->getFormatIdx(), + From->getFirstArg()); break; } - default: - // FIXME: 'clone' copies every member but some of them should be imported. - // Handle other Attrs that have parameters that should be imported. - ToAttr = FromAttr->clone(ToContext); - ToAttr->setRange(ToRange); + + case attr::EnableIf: { + const auto *From = cast<EnableIfAttr>(FromAttr); + AI.importAttr(From, AI.importArg(From->getCond()).value(), + From->getMessage()); + break; + } + + case attr::AssertCapability: { + const auto *From = cast<AssertCapabilityAttr>(FromAttr); + AI.importAttr(From, + AI.importArrayArg(From->args(), From->args_size()).value(), + From->args_size()); + break; + } + case attr::AcquireCapability: { + const auto *From = cast<AcquireCapabilityAttr>(FromAttr); + AI.importAttr(From, + AI.importArrayArg(From->args(), From->args_size()).value(), + From->args_size()); + break; + } + case attr::TryAcquireCapability: { + const auto *From = cast<TryAcquireCapabilityAttr>(FromAttr); + AI.importAttr(From, AI.importArg(From->getSuccessValue()).value(), + AI.importArrayArg(From->args(), From->args_size()).value(), + From->args_size()); + break; + } + case attr::ReleaseCapability: { + const auto *From = cast<ReleaseCapabilityAttr>(FromAttr); + AI.importAttr(From, + AI.importArrayArg(From->args(), From->args_size()).value(), + From->args_size()); + break; + } + case attr::RequiresCapability: { + const auto *From = cast<RequiresCapabilityAttr>(FromAttr); + AI.importAttr(From, + AI.importArrayArg(From->args(), From->args_size()).value(), + From->args_size()); + break; + } + case attr::GuardedBy: { + const auto *From = cast<GuardedByAttr>(FromAttr); + AI.importAttr(From, AI.importArg(From->getArg()).value()); + break; + } + case attr::PtGuardedBy: { + const auto *From = cast<PtGuardedByAttr>(FromAttr); + AI.importAttr(From, AI.importArg(From->getArg()).value()); + break; + } + case attr::AcquiredAfter: { + const auto *From = cast<AcquiredAfterAttr>(FromAttr); + AI.importAttr(From, + AI.importArrayArg(From->args(), From->args_size()).value(), + From->args_size()); + break; + } + case attr::AcquiredBefore: { + const auto *From = cast<AcquiredBeforeAttr>(FromAttr); + AI.importAttr(From, + AI.importArrayArg(From->args(), From->args_size()).value(), + From->args_size()); + break; + } + case attr::AssertExclusiveLock: { + const auto *From = cast<AssertExclusiveLockAttr>(FromAttr); + AI.importAttr(From, + AI.importArrayArg(From->args(), From->args_size()).value(), + From->args_size()); + break; + } + case attr::AssertSharedLock: { + const auto *From = cast<AssertSharedLockAttr>(FromAttr); + AI.importAttr(From, + AI.importArrayArg(From->args(), From->args_size()).value(), + From->args_size()); + break; + } + case attr::ExclusiveTrylockFunction: { + const auto *From = cast<ExclusiveTrylockFunctionAttr>(FromAttr); + AI.importAttr(From, AI.importArg(From->getSuccessValue()).value(), + AI.importArrayArg(From->args(), From->args_size()).value(), + From->args_size()); + break; + } + case attr::SharedTrylockFunction: { + const auto *From = cast<SharedTrylockFunctionAttr>(FromAttr); + AI.importAttr(From, AI.importArg(From->getSuccessValue()).value(), + AI.importArrayArg(From->args(), From->args_size()).value(), + From->args_size()); + break; + } + case attr::LockReturned: { + const auto *From = cast<LockReturnedAttr>(FromAttr); + AI.importAttr(From, AI.importArg(From->getArg()).value()); + break; + } + case attr::LocksExcluded: { + const auto *From = cast<LocksExcludedAttr>(FromAttr); + AI.importAttr(From, + AI.importArrayArg(From->args(), From->args_size()).value(), + From->args_size()); + break; + } + default: { + // The default branch works for attributes that have no arguments to import. + // FIXME: Handle every attribute type that has arguments of type to import + // (most often Expr* or Decl* or type) in the switch above. + AI.cloneAttr(FromAttr); break; } - assert(ToAttr && "Attribute should be created."); - - return ToAttr; + } + + return std::move(AI).getResult(); } Decl *ASTImporter::GetAlreadyImportedOrNull(const Decl *FromD) const { - auto Pos = ImportedDecls.find(FromD); - if (Pos != ImportedDecls.end()) - return Pos->second; - else - return nullptr; + return ImportedDecls.lookup(FromD); } TranslationUnitDecl *ASTImporter::GetFromTU(Decl *ToD) { @@ -8458,6 +9401,19 @@ TranslationUnitDecl *ASTImporter::GetFromTU(Decl *ToD) { return FromDPos->second->getTranslationUnitDecl(); } +Error ASTImporter::ImportAttrs(Decl *ToD, Decl *FromD) { + if (!FromD->hasAttrs() || ToD->hasAttrs()) + return Error::success(); + for (const Attr *FromAttr : FromD->getAttrs()) { + auto ToAttrOrErr = Import(FromAttr); + if (ToAttrOrErr) + ToD->addAttr(*ToAttrOrErr); + else + return ToAttrOrErr.takeError(); + } + return Error::success(); +} + Expected<Decl *> ASTImporter::Import(Decl *FromD) { if (!FromD) return nullptr; @@ -8470,7 +9426,7 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) { // Check whether there was a previous failed import. // If yes return the existing error. if (auto Error = getImportDeclErrorIfAny(FromD)) - return make_error<ImportError>(*Error); + return make_error<ASTImportError>(*Error); // Check whether we've already imported this declaration. Decl *ToD = GetAlreadyImportedOrNull(FromD); @@ -8478,7 +9434,7 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) { // Already imported (possibly from another TU) and with an error. if (auto Error = SharedState->getImportDeclErrorIfAny(ToD)) { setImportDeclError(FromD, *Error); - return make_error<ImportError>(*Error); + return make_error<ASTImportError>(*Error); } // If FromD has some updated flags after last import, apply it. @@ -8530,9 +9486,9 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) { // Error encountered for the first time. // After takeError the error is not usable any more in ToDOrErr. // Get a copy of the error object (any more simple solution for this?). - ImportError ErrOut; + ASTImportError ErrOut; handleAllErrors(ToDOrErr.takeError(), - [&ErrOut](const ImportError &E) { ErrOut = E; }); + [&ErrOut](const ASTImportError &E) { ErrOut = E; }); setImportDeclError(FromD, ErrOut); // Set the error for the mapped to Decl, which is in the "to" context. if (Pos != ImportedDecls.end()) @@ -8540,8 +9496,20 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) { // Set the error for all nodes which have been created before we // recognized the error. - for (const auto &Path : SavedImportPaths[FromD]) + for (const auto &Path : SavedImportPaths[FromD]) { + // The import path contains import-dependency nodes first. + // Save the node that was imported as dependency of the current node. + Decl *PrevFromDi = FromD; for (Decl *FromDi : Path) { + // Begin and end of the path equals 'FromD', skip it. + if (FromDi == FromD) + continue; + // We should not set import error on a node and all following nodes in + // the path if child import errors are ignored. + if (ChildErrorHandlingStrategy(FromDi).ignoreChildErrorOnParent( + PrevFromDi)) + break; + PrevFromDi = FromDi; setImportDeclError(FromDi, ErrOut); //FIXME Should we remove these Decls from ImportedDecls? // Set the error for the mapped to Decl, which is in the "to" context. @@ -8551,10 +9519,11 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) { // FIXME Should we remove these Decls from the LookupTable, // and from ImportedFromDecls? } + } SavedImportPaths.erase(FromD); // Do not return ToDOrErr, error was taken out of it. - return make_error<ImportError>(ErrOut); + return make_error<ASTImportError>(ErrOut); } ToD = *ToDOrErr; @@ -8566,7 +9535,7 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) { if (!ToD) { auto Err = getImportDeclErrorIfAny(FromD); assert(Err); - return make_error<ImportError>(*Err); + return make_error<ASTImportError>(*Err); } // We could import from the current TU without error. But previously we @@ -8574,20 +9543,12 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) { // ASTImporter object) and with an error. if (auto Error = SharedState->getImportDeclErrorIfAny(ToD)) { setImportDeclError(FromD, *Error); - return make_error<ImportError>(*Error); + return make_error<ASTImportError>(*Error); } - // Make sure that ImportImpl registered the imported decl. assert(ImportedDecls.count(FromD) != 0 && "Missing call to MapImported?"); - - if (FromD->hasAttrs()) - for (const Attr *FromAttr : FromD->getAttrs()) { - auto ToAttrOrErr = Import(FromAttr); - if (ToAttrOrErr) - ToD->addAttr(*ToAttrOrErr); - else - return ToAttrOrErr.takeError(); - } + if (auto Error = ImportAttrs(ToD, FromD)) + return std::move(Error); // Notify subclasses. Imported(FromD, ToD); @@ -8597,6 +9558,11 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) { return ToDOrErr; } +llvm::Expected<InheritedConstructor> +ASTImporter::Import(const InheritedConstructor &From) { + return ASTNodeImporter(*this).ImportInheritedConstructor(From); +} + Expected<DeclContext *> ASTImporter::ImportContext(DeclContext *FromDC) { if (!FromDC) return FromDC; @@ -8741,12 +9707,11 @@ ASTImporter::Import(NestedNameSpecifier *FromNNS) { case NestedNameSpecifier::TypeSpec: case NestedNameSpecifier::TypeSpecWithTemplate: - if (Expected<QualType> TyOrErr = - Import(QualType(FromNNS->getAsType(), 0u))) { + if (ExpectedTypePtr TyOrErr = Import(FromNNS->getAsType())) { bool TSTemplate = FromNNS->getKind() == NestedNameSpecifier::TypeSpecWithTemplate; return NestedNameSpecifier::Create(ToContext, Prefix, TSTemplate, - TyOrErr->getTypePtr()); + *TyOrErr); } else { return TyOrErr.takeError(); } @@ -8845,7 +9810,7 @@ Expected<TemplateName> ASTImporter::Import(TemplateName From) { switch (From.getKind()) { case TemplateName::Template: if (ExpectedDecl ToTemplateOrErr = Import(From.getAsTemplateDecl())) - return TemplateName(cast<TemplateDecl>(*ToTemplateOrErr)); + return TemplateName(cast<TemplateDecl>((*ToTemplateOrErr)->getCanonicalDecl())); else return ToTemplateOrErr.takeError(); @@ -8875,13 +9840,11 @@ Expected<TemplateName> ASTImporter::Import(TemplateName From) { auto QualifierOrErr = Import(QTN->getQualifier()); if (!QualifierOrErr) return QualifierOrErr.takeError(); - - if (ExpectedDecl ToTemplateOrErr = Import(From.getAsTemplateDecl())) - return ToContext.getQualifiedTemplateName( - *QualifierOrErr, QTN->hasTemplateKeyword(), - cast<TemplateDecl>(*ToTemplateOrErr)); - else - return ToTemplateOrErr.takeError(); + auto TNOrErr = Import(QTN->getUnderlyingTemplate()); + if (!TNOrErr) + return TNOrErr.takeError(); + return ToContext.getQualifiedTemplateName( + *QualifierOrErr, QTN->hasTemplateKeyword(), *TNOrErr); } case TemplateName::DependentTemplate: { @@ -8902,33 +9865,41 @@ Expected<TemplateName> ASTImporter::Import(TemplateName From) { case TemplateName::SubstTemplateTemplateParm: { SubstTemplateTemplateParmStorage *Subst = From.getAsSubstTemplateTemplateParm(); - ExpectedDecl ParamOrErr = Import(Subst->getParameter()); - if (!ParamOrErr) - return ParamOrErr.takeError(); - auto ReplacementOrErr = Import(Subst->getReplacement()); if (!ReplacementOrErr) return ReplacementOrErr.takeError(); + auto AssociatedDeclOrErr = Import(Subst->getAssociatedDecl()); + if (!AssociatedDeclOrErr) + return AssociatedDeclOrErr.takeError(); + return ToContext.getSubstTemplateTemplateParm( - cast<TemplateTemplateParmDecl>(*ParamOrErr), *ReplacementOrErr); + *ReplacementOrErr, *AssociatedDeclOrErr, Subst->getIndex(), + Subst->getPackIndex()); } case TemplateName::SubstTemplateTemplateParmPack: { - SubstTemplateTemplateParmPackStorage *SubstPack - = From.getAsSubstTemplateTemplateParmPack(); - ExpectedDecl ParamOrErr = Import(SubstPack->getParameterPack()); - if (!ParamOrErr) - return ParamOrErr.takeError(); - + SubstTemplateTemplateParmPackStorage *SubstPack = + From.getAsSubstTemplateTemplateParmPack(); ASTNodeImporter Importer(*this); auto ArgPackOrErr = Importer.ImportTemplateArgument(SubstPack->getArgumentPack()); if (!ArgPackOrErr) return ArgPackOrErr.takeError(); + auto AssociatedDeclOrErr = Import(SubstPack->getAssociatedDecl()); + if (!AssociatedDeclOrErr) + return AssociatedDeclOrErr.takeError(); + return ToContext.getSubstTemplateTemplateParmPack( - cast<TemplateTemplateParmDecl>(*ParamOrErr), *ArgPackOrErr); + *ArgPackOrErr, *AssociatedDeclOrErr, SubstPack->getIndex(), + SubstPack->getFinal()); + } + case TemplateName::UsingTemplate: { + auto UsingOrError = Import(From.getAsUsingShadowDecl()); + if (!UsingOrError) + return UsingOrError.takeError(); + return TemplateName(cast<UsingShadowDecl>(*UsingOrError)); } } @@ -8979,13 +9950,13 @@ Expected<FileID> ASTImporter::Import(FileID FromID, bool IsBuiltin) { ExpectedSLoc ToExLocS = Import(FromEx.getExpansionLocStart()); if (!ToExLocS) return ToExLocS.takeError(); - unsigned TokenLen = FromSM.getFileIDSize(FromID); + unsigned ExLength = FromSM.getFileIDSize(FromID); SourceLocation MLoc; if (FromEx.isMacroArgExpansion()) { - MLoc = ToSM.createMacroArgExpansionLoc(*ToSpLoc, *ToExLocS, TokenLen); + MLoc = ToSM.createMacroArgExpansionLoc(*ToSpLoc, *ToExLocS, ExLength); } else { if (ExpectedSLoc ToExLocE = Import(FromEx.getExpansionLocEnd())) - MLoc = ToSM.createExpansionLoc(*ToSpLoc, *ToExLocS, *ToExLocE, TokenLen, + MLoc = ToSM.createExpansionLoc(*ToSpLoc, *ToExLocS, *ToExLocE, ExLength, FromEx.isExpansionTokenRange()); else return ToExLocE.takeError(); @@ -9027,11 +9998,11 @@ Expected<FileID> ASTImporter::Import(FileID FromID, bool IsBuiltin) { if (ToID.isInvalid() || IsBuiltin) { // FIXME: We want to re-use the existing MemoryBuffer! - llvm::Optional<llvm::MemoryBufferRef> FromBuf = + std::optional<llvm::MemoryBufferRef> FromBuf = Cache->getBufferOrNone(FromContext.getDiagnostics(), FromSM.getFileManager(), SourceLocation{}); if (!FromBuf) - return llvm::make_error<ImportError>(ImportError::Unknown); + return llvm::make_error<ASTImportError>(ASTImportError::Unknown); std::unique_ptr<llvm::MemoryBuffer> ToBuf = llvm::MemoryBuffer::getMemBufferCopy(FromBuf->getBuffer(), @@ -9107,7 +10078,7 @@ Expected<CXXCtorInitializer *> ASTImporter::Import(CXXCtorInitializer *From) { *ToExprOrErr, *RParenLocOrErr); } else { // FIXME: assert? - return make_error<ImportError>(); + return make_error<ASTImportError>(); } } @@ -9261,7 +10232,7 @@ Expected<Selector> ASTImporter::Import(Selector FromSel) { if (FromSel.isNull()) return Selector{}; - SmallVector<IdentifierInfo *, 4> Idents; + SmallVector<const IdentifierInfo *, 4> Idents; Idents.push_back(Import(FromSel.getIdentifierInfoForSlot(0))); for (unsigned I = 1, N = FromSel.getNumArgs(); I < N; ++I) Idents.push_back(Import(FromSel.getIdentifierInfoForSlot(I))); @@ -9381,16 +10352,14 @@ ASTNodeImporter::ImportAPValue(const APValue &FromValue) { } } else { FromElemTy = FromValue.getLValueBase().getTypeInfoType(); - QualType ImpTypeInfo = importChecked( - Err, - QualType(FromValue.getLValueBase().get<TypeInfoLValue>().getType(), - 0)); + const Type *ImpTypeInfo = importChecked( + Err, FromValue.getLValueBase().get<TypeInfoLValue>().getType()); QualType ImpType = importChecked(Err, FromValue.getLValueBase().getTypeInfoType()); if (Err) return std::move(Err); - Base = APValue::LValueBase::getTypeInfo( - TypeInfoLValue(ImpTypeInfo.getTypePtr()), ImpType); + Base = APValue::LValueBase::getTypeInfo(TypeInfoLValue(ImpTypeInfo), + ImpType); } } CharUnits Offset = FromValue.getLValueOffset(); @@ -9438,7 +10407,7 @@ Expected<DeclarationName> ASTImporter::HandleNameConflict(DeclarationName Name, unsigned NumDecls) { if (ODRHandling == ODRHandlingType::Conservative) // Report error at any name conflict. - return make_error<ImportError>(ImportError::NameConflict); + return make_error<ASTImportError>(ASTImportError::NameConflict); else // Allow to create the new Decl with the same name. return Name; @@ -9498,16 +10467,16 @@ Decl *ASTImporter::MapImported(Decl *From, Decl *To) { return To; } -llvm::Optional<ImportError> +std::optional<ASTImportError> ASTImporter::getImportDeclErrorIfAny(Decl *FromD) const { auto Pos = ImportDeclErrors.find(FromD); if (Pos != ImportDeclErrors.end()) return Pos->second; else - return Optional<ImportError>(); + return std::nullopt; } -void ASTImporter::setImportDeclError(Decl *From, ImportError Error) { +void ASTImporter::setImportDeclError(Decl *From, ASTImportError Error) { auto InsertRes = ImportDeclErrors.insert({From, Error}); (void)InsertRes; // Either we set the error for the first time, or we already had set one and diff --git a/contrib/llvm-project/clang/lib/AST/ASTImporterLookupTable.cpp b/contrib/llvm-project/clang/lib/AST/ASTImporterLookupTable.cpp index b78cc0c053f6..07d39dcee258 100644 --- a/contrib/llvm-project/clang/lib/AST/ASTImporterLookupTable.cpp +++ b/contrib/llvm-project/clang/lib/AST/ASTImporterLookupTable.cpp @@ -14,6 +14,7 @@ #include "clang/AST/ASTImporterLookupTable.h" #include "clang/AST/Decl.h" #include "clang/AST/RecursiveASTVisitor.h" +#include "llvm/Support/FormatVariadic.h" namespace clang { @@ -66,6 +67,8 @@ struct Builder : RecursiveASTVisitor<Builder> { } else if (isa<TypedefType>(Ty)) { // We do not put friend typedefs to the lookup table because // ASTImporter does not organize typedefs into redecl chains. + } else if (isa<UsingType>(Ty)) { + // Similar to TypedefType, not putting into lookup table. } else { llvm_unreachable("Unhandled type of friend class"); } @@ -84,6 +87,18 @@ struct Builder : RecursiveASTVisitor<Builder> { ASTImporterLookupTable::ASTImporterLookupTable(TranslationUnitDecl &TU) { Builder B(*this); B.TraverseDecl(&TU); + // The VaList declaration may be created on demand only or not traversed. + // To ensure it is present and found during import, add it to the table now. + if (auto *D = + dyn_cast_or_null<NamedDecl>(TU.getASTContext().getVaListTagDecl())) { + // On some platforms (AArch64) the VaList declaration can be inside a 'std' + // namespace. This is handled specially and not visible by AST traversal. + // ASTImporter must be able to find this namespace to import the VaList + // declaration (and the namespace) correctly. + if (auto *Ns = dyn_cast<NamespaceDecl>(D->getDeclContext())) + add(&TU, Ns); + add(D->getDeclContext(), D); + } } void ASTImporterLookupTable::add(DeclContext *DC, NamedDecl *ND) { @@ -93,10 +108,19 @@ void ASTImporterLookupTable::add(DeclContext *DC, NamedDecl *ND) { } void ASTImporterLookupTable::remove(DeclContext *DC, NamedDecl *ND) { - DeclList &Decls = LookupTable[DC][ND->getDeclName()]; + const DeclarationName Name = ND->getDeclName(); + DeclList &Decls = LookupTable[DC][Name]; bool EraseResult = Decls.remove(ND); (void)EraseResult; - assert(EraseResult == true && "Trying to remove not contained Decl"); +#ifndef NDEBUG + if (!EraseResult) { + std::string Message = + llvm::formatv("Trying to remove not contained Decl '{0}' of type {1}", + Name.getAsString(), DC->getDeclKindName()) + .str(); + llvm_unreachable(Message.c_str()); + } +#endif } void ASTImporterLookupTable::add(NamedDecl *ND) { @@ -130,6 +154,11 @@ void ASTImporterLookupTable::update(NamedDecl *ND, DeclContext *OldDC) { add(ND); } +void ASTImporterLookupTable::updateForced(NamedDecl *ND, DeclContext *OldDC) { + LookupTable[OldDC][ND->getDeclName()].remove(ND); + add(ND); +} + ASTImporterLookupTable::LookupResult ASTImporterLookupTable::lookup(DeclContext *DC, DeclarationName Name) const { auto DCI = LookupTable.find(DC->getPrimaryContext()); @@ -145,7 +174,7 @@ ASTImporterLookupTable::lookup(DeclContext *DC, DeclarationName Name) const { } bool ASTImporterLookupTable::contains(DeclContext *DC, NamedDecl *ND) const { - return 0 < lookup(DC, ND->getDeclName()).count(ND); + return lookup(DC, ND->getDeclName()).contains(ND); } void ASTImporterLookupTable::dump(DeclContext *DC) const { diff --git a/contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp b/contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp index c4ff05ba9325..37555c324282 100644 --- a/contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp +++ b/contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp @@ -74,6 +74,7 @@ #include "clang/AST/ExprOpenMP.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/StmtObjC.h" +#include "clang/AST/StmtOpenACC.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/TemplateBase.h" #include "clang/AST/TemplateName.h" @@ -84,13 +85,12 @@ #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/APInt.h" #include "llvm/ADT/APSInt.h" -#include "llvm/ADT/None.h" -#include "llvm/ADT/Optional.h" #include "llvm/ADT/StringExtras.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <cassert> +#include <optional> #include <utility> using namespace clang; @@ -100,9 +100,14 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, Decl *D1, Decl *D2); static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + const Stmt *S1, const Stmt *S2); +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, const TemplateArgument &Arg1, const TemplateArgument &Arg2); static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + const TemplateArgumentLoc &Arg1, + const TemplateArgumentLoc &Arg2); +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, NestedNameSpecifier *NNS1, NestedNameSpecifier *NNS2); static bool IsStructurallyEquivalent(const IdentifierInfo *Name1, @@ -212,6 +217,15 @@ class StmtComparer { return E1->size() == E2->size(); } + bool IsStmtEquivalent(const DeclRefExpr *DRE1, const DeclRefExpr *DRE2) { + const ValueDecl *Decl1 = DRE1->getDecl(); + const ValueDecl *Decl2 = DRE2->getDecl(); + if (!Decl1 || !Decl2) + return false; + return IsStructurallyEquivalent(Context, const_cast<ValueDecl *>(Decl1), + const_cast<ValueDecl *>(Decl2)); + } + bool IsStmtEquivalent(const DependentScopeDeclRefExpr *DE1, const DependentScopeDeclRefExpr *DE2) { if (!IsStructurallyEquivalent(Context, DE1->getDeclName(), @@ -238,8 +252,8 @@ class StmtComparer { const GenericSelectionExpr *E2) { for (auto Pair : zip_longest(E1->getAssocTypeSourceInfos(), E2->getAssocTypeSourceInfos())) { - Optional<TypeSourceInfo *> Child1 = std::get<0>(Pair); - Optional<TypeSourceInfo *> Child2 = std::get<1>(Pair); + std::optional<TypeSourceInfo *> Child1 = std::get<0>(Pair); + std::optional<TypeSourceInfo *> Child2 = std::get<1>(Pair); // Skip this case if there are a different number of associated types. if (!Child1 || !Child2) return false; @@ -275,6 +289,17 @@ class StmtComparer { bool IsStmtEquivalent(const Stmt *S1, const Stmt *S2) { return true; } + bool IsStmtEquivalent(const GotoStmt *S1, const GotoStmt *S2) { + LabelDecl *L1 = S1->getLabel(); + LabelDecl *L2 = S2->getLabel(); + if (!L1 || !L2) + return L1 == L2; + + IdentifierInfo *Name1 = L1->getIdentifier(); + IdentifierInfo *Name2 = L2->getIdentifier(); + return ::IsStructurallyEquivalent(Name1, Name2); + } + bool IsStmtEquivalent(const SourceLocExpr *E1, const SourceLocExpr *E2) { return E1->getIdentKind() == E2->getIdentKind(); } @@ -289,8 +314,14 @@ class StmtComparer { bool IsStmtEquivalent(const SubstNonTypeTemplateParmExpr *E1, const SubstNonTypeTemplateParmExpr *E2) { - return IsStructurallyEquivalent(Context, E1->getParameter(), - E2->getParameter()); + if (!IsStructurallyEquivalent(Context, E1->getAssociatedDecl(), + E2->getAssociatedDecl())) + return false; + if (E1->getIndex() != E2->getIndex()) + return false; + if (E1->getPackIndex() != E2->getPackIndex()) + return false; + return true; } bool IsStmtEquivalent(const SubstNonTypeTemplateParmPackExpr *E1, @@ -304,8 +335,8 @@ class StmtComparer { return false; for (auto Pair : zip_longest(E1->getArgs(), E2->getArgs())) { - Optional<TypeSourceInfo *> Child1 = std::get<0>(Pair); - Optional<TypeSourceInfo *> Child2 = std::get<1>(Pair); + std::optional<TypeSourceInfo *> Child1 = std::get<0>(Pair); + std::optional<TypeSourceInfo *> Child2 = std::get<1>(Pair); // Different number of args. if (!Child1 || !Child2) return false; @@ -317,6 +348,15 @@ class StmtComparer { return true; } + bool IsStmtEquivalent(const CXXDependentScopeMemberExpr *E1, + const CXXDependentScopeMemberExpr *E2) { + if (!IsStructurallyEquivalent(Context, E1->getMember(), E2->getMember())) { + return false; + } + return IsStructurallyEquivalent(Context, E1->getBaseType(), + E2->getBaseType()); + } + bool IsStmtEquivalent(const UnaryExprOrTypeTraitExpr *E1, const UnaryExprOrTypeTraitExpr *E2) { if (E1->getKind() != E2->getKind()) @@ -334,6 +374,34 @@ class StmtComparer { return true; } + bool IsStmtEquivalent(const OverloadExpr *E1, const OverloadExpr *E2) { + if (!IsStructurallyEquivalent(Context, E1->getName(), E2->getName())) + return false; + + if (static_cast<bool>(E1->getQualifier()) != + static_cast<bool>(E2->getQualifier())) + return false; + if (E1->getQualifier() && + !IsStructurallyEquivalent(Context, E1->getQualifier(), + E2->getQualifier())) + return false; + + if (E1->getNumTemplateArgs() != E2->getNumTemplateArgs()) + return false; + const TemplateArgumentLoc *Args1 = E1->getTemplateArgs(); + const TemplateArgumentLoc *Args2 = E2->getTemplateArgs(); + for (unsigned int ArgI = 0, ArgN = E1->getNumTemplateArgs(); ArgI < ArgN; + ++ArgI) + if (!IsStructurallyEquivalent(Context, Args1[ArgI], Args2[ArgI])) + return false; + + return true; + } + + bool IsStmtEquivalent(const CXXBoolLiteralExpr *E1, const CXXBoolLiteralExpr *E2) { + return E1->getValue() == E2->getValue(); + } + /// End point of the traversal chain. bool TraverseStmt(const Stmt *S1, const Stmt *S2) { return true; } @@ -381,12 +449,67 @@ public: }; } // namespace +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + const UnaryOperator *E1, + const CXXOperatorCallExpr *E2) { + return UnaryOperator::getOverloadedOperator(E1->getOpcode()) == + E2->getOperator() && + IsStructurallyEquivalent(Context, E1->getSubExpr(), E2->getArg(0)); +} + +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + const CXXOperatorCallExpr *E1, + const UnaryOperator *E2) { + return E1->getOperator() == + UnaryOperator::getOverloadedOperator(E2->getOpcode()) && + IsStructurallyEquivalent(Context, E1->getArg(0), E2->getSubExpr()); +} + +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + const BinaryOperator *E1, + const CXXOperatorCallExpr *E2) { + return BinaryOperator::getOverloadedOperator(E1->getOpcode()) == + E2->getOperator() && + IsStructurallyEquivalent(Context, E1->getLHS(), E2->getArg(0)) && + IsStructurallyEquivalent(Context, E1->getRHS(), E2->getArg(1)); +} + +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + const CXXOperatorCallExpr *E1, + const BinaryOperator *E2) { + return E1->getOperator() == + BinaryOperator::getOverloadedOperator(E2->getOpcode()) && + IsStructurallyEquivalent(Context, E1->getArg(0), E2->getLHS()) && + IsStructurallyEquivalent(Context, E1->getArg(1), E2->getRHS()); +} + /// Determine structural equivalence of two statements. static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, const Stmt *S1, const Stmt *S2) { if (!S1 || !S2) return S1 == S2; + // Check for statements with similar syntax but different AST. + // A UnaryOperator node is more lightweight than a CXXOperatorCallExpr node. + // The more heavyweight node is only created if the definition-time name + // lookup had any results. The lookup results are stored CXXOperatorCallExpr + // only. The lookup results can be different in a "From" and "To" AST even if + // the compared structure is otherwise equivalent. For this reason we must + // treat a similar unary/binary operator node and CXXOperatorCall node as + // equivalent. + if (const auto *E2CXXOperatorCall = dyn_cast<CXXOperatorCallExpr>(S2)) { + if (const auto *E1Unary = dyn_cast<UnaryOperator>(S1)) + return IsStructurallyEquivalent(Context, E1Unary, E2CXXOperatorCall); + if (const auto *E1Binary = dyn_cast<BinaryOperator>(S1)) + return IsStructurallyEquivalent(Context, E1Binary, E2CXXOperatorCall); + } + if (const auto *E1CXXOperatorCall = dyn_cast<CXXOperatorCallExpr>(S1)) { + if (const auto *E2Unary = dyn_cast<UnaryOperator>(S2)) + return IsStructurallyEquivalent(Context, E1CXXOperatorCall, E2Unary); + if (const auto *E2Binary = dyn_cast<BinaryOperator>(S2)) + return IsStructurallyEquivalent(Context, E1CXXOperatorCall, E2Binary); + } + // Compare the statements itself. StmtComparer Comparer(Context); if (!Comparer.IsEquivalent(S1, S2)) @@ -394,8 +517,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, // Iterate over the children of both statements and also compare them. for (auto Pair : zip_longest(S1->children(), S2->children())) { - Optional<const Stmt *> Child1 = std::get<0>(Pair); - Optional<const Stmt *> Child2 = std::get<1>(Pair); + std::optional<const Stmt *> Child1 = std::get<0>(Pair); + std::optional<const Stmt *> Child2 = std::get<1>(Pair); // One of the statements has a different amount of children than the other, // so the statements can't be equivalent. if (!Child1 || !Child2) @@ -510,13 +633,15 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, *P2 = N2.getAsSubstTemplateTemplateParmPack(); return IsStructurallyEquivalent(Context, P1->getArgumentPack(), P2->getArgumentPack()) && - IsStructurallyEquivalent(Context, P1->getParameterPack(), - P2->getParameterPack()); + IsStructurallyEquivalent(Context, P1->getAssociatedDecl(), + P2->getAssociatedDecl()) && + P1->getIndex() == P2->getIndex(); } case TemplateName::Template: case TemplateName::QualifiedTemplate: case TemplateName::SubstTemplateTemplateParm: + case TemplateName::UsingTemplate: // It is sufficient to check value of getAsTemplateDecl. break; @@ -525,6 +650,10 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, return true; } +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + ArrayRef<TemplateArgument> Args1, + ArrayRef<TemplateArgument> Args2); + /// Determine whether two template arguments are equivalent. static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, const TemplateArgument &Arg1, @@ -566,19 +695,36 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, return IsStructurallyEquivalent(Context, Arg1.getAsExpr(), Arg2.getAsExpr()); + case TemplateArgument::StructuralValue: + return Arg1.structurallyEquals(Arg2); + case TemplateArgument::Pack: - if (Arg1.pack_size() != Arg2.pack_size()) - return false; + return IsStructurallyEquivalent(Context, Arg1.pack_elements(), + Arg2.pack_elements()); + } - for (unsigned I = 0, N = Arg1.pack_size(); I != N; ++I) - if (!IsStructurallyEquivalent(Context, Arg1.pack_begin()[I], - Arg2.pack_begin()[I])) - return false; + llvm_unreachable("Invalid template argument kind"); +} - return true; +/// Determine structural equivalence of two template argument lists. +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + ArrayRef<TemplateArgument> Args1, + ArrayRef<TemplateArgument> Args2) { + if (Args1.size() != Args2.size()) + return false; + for (unsigned I = 0, N = Args1.size(); I != N; ++I) { + if (!IsStructurallyEquivalent(Context, Args1[I], Args2[I])) + return false; } + return true; +} - llvm_unreachable("Invalid template argument kind"); +/// Determine whether two template argument locations are equivalent. +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + const TemplateArgumentLoc &Arg1, + const TemplateArgumentLoc &Arg2) { + return IsStructurallyEquivalent(Context, Arg1.getArgument(), + Arg2.getArgument()); } /// Determine structural equivalence for the common part of array @@ -703,6 +849,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, case Type::Adjusted: case Type::Decayed: + case Type::ArrayParameter: if (!IsStructurallyEquivalent(Context, cast<AdjustedType>(T1)->getOriginalType(), cast<AdjustedType>(T2)->getOriginalType())) @@ -899,7 +1046,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, return false; // Fall through to check the bits common with FunctionNoProtoType. - LLVM_FALLTHROUGH; + [[fallthrough]]; } case Type::FunctionNoProto: { @@ -932,6 +1079,20 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, return false; break; + case Type::CountAttributed: + if (!IsStructurallyEquivalent(Context, + cast<CountAttributedType>(T1)->desugar(), + cast<CountAttributedType>(T2)->desugar())) + return false; + break; + + case Type::BTFTagAttributed: + if (!IsStructurallyEquivalent( + Context, cast<BTFTagAttributedType>(T1)->getWrappedType(), + cast<BTFTagAttributedType>(T2)->getWrappedType())) + return false; + break; + case Type::Paren: if (!IsStructurallyEquivalent(Context, cast<ParenType>(T1)->getInnerType(), cast<ParenType>(T2)->getInnerType())) @@ -945,9 +1106,21 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, return false; break; + case Type::Using: + if (!IsStructurallyEquivalent(Context, cast<UsingType>(T1)->getFoundDecl(), + cast<UsingType>(T2)->getFoundDecl())) + return false; + if (!IsStructurallyEquivalent(Context, + cast<UsingType>(T1)->getUnderlyingType(), + cast<UsingType>(T2)->getUnderlyingType())) + return false; + break; + case Type::Typedef: if (!IsStructurallyEquivalent(Context, cast<TypedefType>(T1)->getDecl(), - cast<TypedefType>(T2)->getDecl())) + cast<TypedefType>(T2)->getDecl()) || + !IsStructurallyEquivalent(Context, cast<TypedefType>(T1)->desugar(), + cast<TypedefType>(T2)->desugar())) return false; break; @@ -960,8 +1133,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, case Type::TypeOf: if (!IsStructurallyEquivalent(Context, - cast<TypeOfType>(T1)->getUnderlyingType(), - cast<TypeOfType>(T2)->getUnderlyingType())) + cast<TypeOfType>(T1)->getUnmodifiedType(), + cast<TypeOfType>(T2)->getUnmodifiedType())) return false; break; @@ -991,16 +1164,10 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, if (Auto1->getTypeConstraintConcept() != Auto2->getTypeConstraintConcept()) return false; - ArrayRef<TemplateArgument> Auto1Args = - Auto1->getTypeConstraintArguments(); - ArrayRef<TemplateArgument> Auto2Args = - Auto2->getTypeConstraintArguments(); - if (Auto1Args.size() != Auto2Args.size()) + if (!IsStructurallyEquivalent(Context, + Auto1->getTypeConstraintArguments(), + Auto2->getTypeConstraintArguments())) return false; - for (unsigned I = 0, N = Auto1Args.size(); I != N; ++I) { - if (!IsStructurallyEquivalent(Context, Auto1Args[I], Auto2Args[I])) - return false; - } } break; } @@ -1027,7 +1194,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, case Type::TemplateTypeParm: { const auto *Parm1 = cast<TemplateTypeParmType>(T1); const auto *Parm2 = cast<TemplateTypeParmType>(T2); - if (Parm1->getDepth() != Parm2->getDepth()) + if (!Context.IgnoreTemplateParmDepth && + Parm1->getDepth() != Parm2->getDepth()) return false; if (Parm1->getIndex() != Parm2->getIndex()) return false; @@ -1041,22 +1209,26 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, case Type::SubstTemplateTypeParm: { const auto *Subst1 = cast<SubstTemplateTypeParmType>(T1); const auto *Subst2 = cast<SubstTemplateTypeParmType>(T2); - if (!IsStructurallyEquivalent(Context, - QualType(Subst1->getReplacedParameter(), 0), - QualType(Subst2->getReplacedParameter(), 0))) - return false; if (!IsStructurallyEquivalent(Context, Subst1->getReplacementType(), Subst2->getReplacementType())) return false; + if (!IsStructurallyEquivalent(Context, Subst1->getAssociatedDecl(), + Subst2->getAssociatedDecl())) + return false; + if (Subst1->getIndex() != Subst2->getIndex()) + return false; + if (Subst1->getPackIndex() != Subst2->getPackIndex()) + return false; break; } case Type::SubstTemplateTypeParmPack: { const auto *Subst1 = cast<SubstTemplateTypeParmPackType>(T1); const auto *Subst2 = cast<SubstTemplateTypeParmPackType>(T2); - if (!IsStructurallyEquivalent(Context, - QualType(Subst1->getReplacedParameter(), 0), - QualType(Subst2->getReplacedParameter(), 0))) + if (!IsStructurallyEquivalent(Context, Subst1->getAssociatedDecl(), + Subst2->getAssociatedDecl())) + return false; + if (Subst1->getIndex() != Subst2->getIndex()) return false; if (!IsStructurallyEquivalent(Context, Subst1->getArgumentPack(), Subst2->getArgumentPack())) @@ -1070,20 +1242,18 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, if (!IsStructurallyEquivalent(Context, Spec1->getTemplateName(), Spec2->getTemplateName())) return false; - if (Spec1->getNumArgs() != Spec2->getNumArgs()) + if (!IsStructurallyEquivalent(Context, Spec1->template_arguments(), + Spec2->template_arguments())) return false; - for (unsigned I = 0, N = Spec1->getNumArgs(); I != N; ++I) { - if (!IsStructurallyEquivalent(Context, Spec1->getArg(I), - Spec2->getArg(I))) - return false; - } break; } case Type::Elaborated: { const auto *Elab1 = cast<ElaboratedType>(T1); const auto *Elab2 = cast<ElaboratedType>(T2); - // CHECKME: what if a keyword is ETK_None or ETK_typename ? + // CHECKME: what if a keyword is ElaboratedTypeKeyword::None or + // ElaboratedTypeKeyword::Typename + // ? if (Elab1->getKeyword() != Elab2->getKeyword()) return false; if (!IsStructurallyEquivalent(Context, Elab1->getQualifier(), @@ -1127,13 +1297,9 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, if (!IsStructurallyEquivalent(Spec1->getIdentifier(), Spec2->getIdentifier())) return false; - if (Spec1->getNumArgs() != Spec2->getNumArgs()) + if (!IsStructurallyEquivalent(Context, Spec1->template_arguments(), + Spec2->template_arguments())) return false; - for (unsigned I = 0, N = Spec1->getNumArgs(); I != N; ++I) { - if (!IsStructurallyEquivalent(Context, Spec1->getArg(I), - Spec2->getArg(I))) - return false; - } break; } @@ -1144,6 +1310,16 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, return false; break; + case Type::PackIndexing: + if (!IsStructurallyEquivalent(Context, + cast<PackIndexingType>(T1)->getPattern(), + cast<PackIndexingType>(T2)->getPattern())) + if (!IsStructurallyEquivalent(Context, + cast<PackIndexingType>(T1)->getIndexExpr(), + cast<PackIndexingType>(T2)->getIndexExpr())) + return false; + break; + case Type::ObjCInterface: { const auto *Iface1 = cast<ObjCInterfaceType>(T1); const auto *Iface2 = cast<ObjCInterfaceType>(T2); @@ -1205,33 +1381,56 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, cast<PipeType>(T2)->getElementType())) return false; break; - case Type::ExtInt: { - const auto *Int1 = cast<ExtIntType>(T1); - const auto *Int2 = cast<ExtIntType>(T2); + case Type::BitInt: { + const auto *Int1 = cast<BitIntType>(T1); + const auto *Int2 = cast<BitIntType>(T2); if (Int1->isUnsigned() != Int2->isUnsigned() || Int1->getNumBits() != Int2->getNumBits()) return false; break; } - case Type::DependentExtInt: { - const auto *Int1 = cast<DependentExtIntType>(T1); - const auto *Int2 = cast<DependentExtIntType>(T2); + case Type::DependentBitInt: { + const auto *Int1 = cast<DependentBitIntType>(T1); + const auto *Int2 = cast<DependentBitIntType>(T2); if (Int1->isUnsigned() != Int2->isUnsigned() || !IsStructurallyEquivalent(Context, Int1->getNumBitsExpr(), Int2->getNumBitsExpr())) return false; + break; } } // end switch return true; } -/// Determine structural equivalence of two fields. static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, - FieldDecl *Field1, FieldDecl *Field2) { - const auto *Owner2 = cast<RecordDecl>(Field2->getDeclContext()); + VarDecl *D1, VarDecl *D2) { + IdentifierInfo *Name1 = D1->getIdentifier(); + IdentifierInfo *Name2 = D2->getIdentifier(); + if (!::IsStructurallyEquivalent(Name1, Name2)) + return false; + + if (!IsStructurallyEquivalent(Context, D1->getType(), D2->getType())) + return false; + + // Compare storage class and initializer only if none or both are a + // definition. Like a forward-declaration matches a class definition, variable + // declarations that are not definitions should match with the definitions. + if (D1->isThisDeclarationADefinition() != D2->isThisDeclarationADefinition()) + return true; + + if (D1->getStorageClass() != D2->getStorageClass()) + return false; + + return IsStructurallyEquivalent(Context, D1->getInit(), D2->getInit()); +} + +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + FieldDecl *Field1, FieldDecl *Field2, + QualType Owner2Type) { + const auto *Owner2 = cast<Decl>(Field2->getDeclContext()); // For anonymous structs/unions, match up the anonymous struct/union type // declarations directly, so that we don't go off searching for anonymous @@ -1251,7 +1450,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, Context.Diag2( Owner2->getLocation(), Context.getApplicableDiagnostic(diag::err_odr_tag_type_inconsistent)) - << Context.ToCtx.getTypeDeclType(Owner2); + << Owner2Type; Context.Diag2(Field2->getLocation(), diag::note_odr_field_name) << Field2->getDeclName(); Context.Diag1(Field1->getLocation(), diag::note_odr_field_name) @@ -1266,7 +1465,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, Context.Diag2( Owner2->getLocation(), Context.getApplicableDiagnostic(diag::err_odr_tag_type_inconsistent)) - << Context.ToCtx.getTypeDeclType(Owner2); + << Owner2Type; Context.Diag2(Field2->getLocation(), diag::note_odr_field) << Field2->getDeclName() << Field2->getType(); Context.Diag1(Field1->getLocation(), diag::note_odr_field) @@ -1282,6 +1481,14 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, return true; } +/// Determine structural equivalence of two fields. +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + FieldDecl *Field1, FieldDecl *Field2) { + const auto *Owner2 = cast<RecordDecl>(Field2->getDeclContext()); + return IsStructurallyEquivalent(Context, Field1, Field2, + Context.ToCtx.getTypeDeclType(Owner2)); +} + /// Determine structural equivalence of two methods. static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, CXXMethodDecl *Method1, @@ -1292,10 +1499,12 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, Method1->getAccess() == Method2->getAccess() && Method1->getOverloadedOperator() == Method2->getOverloadedOperator() && Method1->isStatic() == Method2->isStatic() && + Method1->isImplicitObjectMemberFunction() == + Method2->isImplicitObjectMemberFunction() && Method1->isConst() == Method2->isConst() && Method1->isVolatile() == Method2->isVolatile() && Method1->isVirtual() == Method2->isVirtual() && - Method1->isPure() == Method2->isPure() && + Method1->isPureVirtual() == Method2->isPureVirtual() && Method1->isDefaulted() == Method2->isDefaulted() && Method1->isDeleted() == Method2->isDeleted(); if (!PropertiesEqual) @@ -1347,19 +1556,66 @@ IsStructurallyEquivalentLambdas(StructuralEquivalenceContext &Context, return true; } +/// Determine if context of a class is equivalent. +static bool +IsRecordContextStructurallyEquivalent(StructuralEquivalenceContext &Context, + RecordDecl *D1, RecordDecl *D2) { + // The context should be completely equal, including anonymous and inline + // namespaces. + // We compare objects as part of full translation units, not subtrees of + // translation units. + DeclContext *DC1 = D1->getDeclContext()->getNonTransparentContext(); + DeclContext *DC2 = D2->getDeclContext()->getNonTransparentContext(); + while (true) { + // Special case: We allow a struct defined in a function to be equivalent + // with a similar struct defined outside of a function. + if ((DC1->isFunctionOrMethod() && DC2->isTranslationUnit()) || + (DC2->isFunctionOrMethod() && DC1->isTranslationUnit())) + return true; + + if (DC1->getDeclKind() != DC2->getDeclKind()) + return false; + if (DC1->isTranslationUnit()) + break; + if (DC1->isInlineNamespace() != DC2->isInlineNamespace()) + return false; + if (const auto *ND1 = dyn_cast<NamedDecl>(DC1)) { + const auto *ND2 = cast<NamedDecl>(DC2); + if (!DC1->isInlineNamespace() && + !IsStructurallyEquivalent(ND1->getIdentifier(), ND2->getIdentifier())) + return false; + } + + if (auto *D1Spec = dyn_cast<ClassTemplateSpecializationDecl>(DC1)) { + auto *D2Spec = dyn_cast<ClassTemplateSpecializationDecl>(DC2); + if (!IsStructurallyEquivalent(Context, D1Spec, D2Spec)) + return false; + } + + DC1 = DC1->getParent()->getNonTransparentContext(); + DC2 = DC2->getParent()->getNonTransparentContext(); + } + + return true; +} + +static bool NameIsStructurallyEquivalent(const TagDecl &D1, const TagDecl &D2) { + auto GetName = [](const TagDecl &D) -> const IdentifierInfo * { + if (const IdentifierInfo *Name = D.getIdentifier()) + return Name; + if (const TypedefNameDecl *TypedefName = D.getTypedefNameForAnonDecl()) + return TypedefName->getIdentifier(); + return nullptr; + }; + return IsStructurallyEquivalent(GetName(D1), GetName(D2)); +} + /// Determine structural equivalence of two records. static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, RecordDecl *D1, RecordDecl *D2) { - - // Check for equivalent structure names. - IdentifierInfo *Name1 = D1->getIdentifier(); - if (!Name1 && D1->getTypedefNameForAnonDecl()) - Name1 = D1->getTypedefNameForAnonDecl()->getIdentifier(); - IdentifierInfo *Name2 = D2->getIdentifier(); - if (!Name2 && D2->getTypedefNameForAnonDecl()) - Name2 = D2->getTypedefNameForAnonDecl()->getIdentifier(); - if (!IsStructurallyEquivalent(Name1, Name2)) + if (!NameIsStructurallyEquivalent(*D1, *D2)) { return false; + } if (D1->isUnion() != D2->isUnion()) { if (Context.Complain) { @@ -1375,9 +1631,9 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, if (!D1->getDeclName() && !D2->getDeclName()) { // If both anonymous structs/unions are in a record context, make sure // they occur in the same location in the context records. - if (Optional<unsigned> Index1 = + if (std::optional<unsigned> Index1 = StructuralEquivalenceContext::findUntaggedStructOrUnionIndex(D1)) { - if (Optional<unsigned> Index2 = + if (std::optional<unsigned> Index2 = StructuralEquivalenceContext::findUntaggedStructOrUnionIndex( D2)) { if (*Index1 != *Index2) @@ -1386,6 +1642,12 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, } } + // If the records occur in different context (namespace), these should be + // different. This is specially important if the definition of one or both + // records is missing. + if (!IsRecordContextStructurallyEquivalent(Context, D1, D2)) + return false; + // If both declarations are class template specializations, we know // the ODR applies, so check the template and template arguments. const auto *Spec1 = dyn_cast<ClassTemplateSpecializationDecl>(D1); @@ -1554,6 +1816,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, } // Check the fields for consistency. + QualType D2Type = Context.ToCtx.getTypeDeclType(D2); RecordDecl::field_iterator Field2 = D2->field_begin(), Field2End = D2->field_end(); for (RecordDecl::field_iterator Field1 = D1->field_begin(), @@ -1572,7 +1835,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, return false; } - if (!IsStructurallyEquivalent(Context, *Field1, *Field2)) + if (!IsStructurallyEquivalent(Context, *Field1, *Field2, D2Type)) return false; } @@ -1591,19 +1854,32 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, return true; } +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + EnumConstantDecl *D1, + EnumConstantDecl *D2) { + const llvm::APSInt &FromVal = D1->getInitVal(); + const llvm::APSInt &ToVal = D2->getInitVal(); + if (FromVal.isSigned() != ToVal.isSigned()) + return false; + if (FromVal.getBitWidth() != ToVal.getBitWidth()) + return false; + if (FromVal != ToVal) + return false; + + if (!IsStructurallyEquivalent(D1->getIdentifier(), D2->getIdentifier())) + return false; + + // Init expressions are the most expensive check, so do them last. + return IsStructurallyEquivalent(Context, D1->getInitExpr(), + D2->getInitExpr()); +} + /// Determine structural equivalence of two enums. static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, EnumDecl *D1, EnumDecl *D2) { - - // Check for equivalent enum names. - IdentifierInfo *Name1 = D1->getIdentifier(); - if (!Name1 && D1->getTypedefNameForAnonDecl()) - Name1 = D1->getTypedefNameForAnonDecl()->getIdentifier(); - IdentifierInfo *Name2 = D2->getIdentifier(); - if (!Name2 && D2->getTypedefNameForAnonDecl()) - Name2 = D2->getTypedefNameForAnonDecl()->getIdentifier(); - if (!IsStructurallyEquivalent(Name1, Name2)) + if (!NameIsStructurallyEquivalent(*D1, *D2)) { return false; + } // Compare the definitions of these two enums. If either or both are // incomplete (i.e. forward declared), we assume that they are equivalent. @@ -1730,7 +2006,10 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, } return false; } - + if (!Context.IgnoreTemplateParmDepth && D1->getDepth() != D2->getDepth()) + return false; + if (D1->getIndex() != D2->getIndex()) + return false; // Check types. if (!IsStructurallyEquivalent(Context, D1->getType(), D2->getType())) { if (Context.Complain) { @@ -1803,6 +2082,18 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, } static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + TypeAliasTemplateDecl *D1, + TypeAliasTemplateDecl *D2) { + // Check template parameters. + if (!IsTemplateDeclCommonStructurallyEquivalent(Context, D1, D2)) + return false; + + // Check the templated declaration. + return IsStructurallyEquivalent(Context, D1->getTemplatedDecl(), + D2->getTemplatedDecl()); +} + +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, ConceptDecl *D1, ConceptDecl *D2) { // Check template parameters. @@ -1858,6 +2149,132 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, return true; } +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + ObjCIvarDecl *D1, ObjCIvarDecl *D2, + QualType Owner2Type) { + if (D1->getAccessControl() != D2->getAccessControl()) + return false; + + return IsStructurallyEquivalent(Context, cast<FieldDecl>(D1), + cast<FieldDecl>(D2), Owner2Type); +} + +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + ObjCIvarDecl *D1, ObjCIvarDecl *D2) { + QualType Owner2Type = + Context.ToCtx.getObjCInterfaceType(D2->getContainingInterface()); + return IsStructurallyEquivalent(Context, D1, D2, Owner2Type); +} + +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + ObjCMethodDecl *Method1, + ObjCMethodDecl *Method2) { + bool PropertiesEqual = + Method1->isInstanceMethod() == Method2->isInstanceMethod() && + Method1->isVariadic() == Method2->isVariadic() && + Method1->isDirectMethod() == Method2->isDirectMethod(); + if (!PropertiesEqual) + return false; + + // Compare selector slot names. + Selector Selector1 = Method1->getSelector(), + Selector2 = Method2->getSelector(); + unsigned NumArgs = Selector1.getNumArgs(); + if (NumArgs != Selector2.getNumArgs()) + return false; + // Compare all selector slots. For selectors with arguments it means all arg + // slots. And if there are no arguments, compare the first-and-only slot. + unsigned SlotsToCheck = NumArgs > 0 ? NumArgs : 1; + for (unsigned I = 0; I < SlotsToCheck; ++I) { + if (!IsStructurallyEquivalent(Selector1.getIdentifierInfoForSlot(I), + Selector2.getIdentifierInfoForSlot(I))) + return false; + } + + // Compare types. + if (!IsStructurallyEquivalent(Context, Method1->getReturnType(), + Method2->getReturnType())) + return false; + assert( + Method1->param_size() == Method2->param_size() && + "Same number of arguments should be already enforced in Selector checks"); + for (ObjCMethodDecl::param_type_iterator + ParamT1 = Method1->param_type_begin(), + ParamT1End = Method1->param_type_end(), + ParamT2 = Method2->param_type_begin(), + ParamT2End = Method2->param_type_end(); + (ParamT1 != ParamT1End) && (ParamT2 != ParamT2End); + ++ParamT1, ++ParamT2) { + if (!IsStructurallyEquivalent(Context, *ParamT1, *ParamT2)) + return false; + } + + return true; +} + +static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, + ObjCCategoryDecl *D1, + ObjCCategoryDecl *D2) { + if (!IsStructurallyEquivalent(D1->getIdentifier(), D2->getIdentifier())) + return false; + + const ObjCInterfaceDecl *Intf1 = D1->getClassInterface(), + *Intf2 = D2->getClassInterface(); + if ((!Intf1 || !Intf2) && (Intf1 != Intf2)) + return false; + + if (Intf1 && + !IsStructurallyEquivalent(Intf1->getIdentifier(), Intf2->getIdentifier())) + return false; + + // Compare protocols. + ObjCCategoryDecl::protocol_iterator Protocol2 = D2->protocol_begin(), + Protocol2End = D2->protocol_end(); + for (ObjCCategoryDecl::protocol_iterator Protocol1 = D1->protocol_begin(), + Protocol1End = D1->protocol_end(); + Protocol1 != Protocol1End; ++Protocol1, ++Protocol2) { + if (Protocol2 == Protocol2End) + return false; + if (!IsStructurallyEquivalent((*Protocol1)->getIdentifier(), + (*Protocol2)->getIdentifier())) + return false; + } + if (Protocol2 != Protocol2End) + return false; + + // Compare ivars. + QualType D2Type = + Intf2 ? Context.ToCtx.getObjCInterfaceType(Intf2) : QualType(); + ObjCCategoryDecl::ivar_iterator Ivar2 = D2->ivar_begin(), + Ivar2End = D2->ivar_end(); + for (ObjCCategoryDecl::ivar_iterator Ivar1 = D1->ivar_begin(), + Ivar1End = D1->ivar_end(); + Ivar1 != Ivar1End; ++Ivar1, ++Ivar2) { + if (Ivar2 == Ivar2End) + return false; + if (!IsStructurallyEquivalent(Context, *Ivar1, *Ivar2, D2Type)) + return false; + } + if (Ivar2 != Ivar2End) + return false; + + // Compare methods. + ObjCCategoryDecl::method_iterator Method2 = D2->meth_begin(), + Method2End = D2->meth_end(); + for (ObjCCategoryDecl::method_iterator Method1 = D1->meth_begin(), + Method1End = D1->meth_end(); + Method1 != Method1End; ++Method1, ++Method2) { + if (Method2 == Method2End) + return false; + if (!IsStructurallyEquivalent(Context, *Method1, *Method2)) + return false; + } + if (Method2 != Method2End) + return false; + + return true; +} + /// Determine structural equivalence of two declarations. static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, Decl *D1, Decl *D2) { @@ -1902,14 +2319,14 @@ DiagnosticBuilder StructuralEquivalenceContext::Diag2(SourceLocation Loc, return ToCtx.getDiagnostics().Report(Loc, DiagID); } -Optional<unsigned> +std::optional<unsigned> StructuralEquivalenceContext::findUntaggedStructOrUnionIndex(RecordDecl *Anon) { ASTContext &Context = Anon->getASTContext(); QualType AnonTy = Context.getRecordType(Anon); const auto *Owner = dyn_cast<RecordDecl>(Anon->getDeclContext()); if (!Owner) - return None; + return std::nullopt; unsigned Index = 0; for (const auto *D : Owner->noload_decls()) { diff --git a/contrib/llvm-project/clang/lib/AST/ASTTypeTraits.cpp b/contrib/llvm-project/clang/lib/AST/ASTTypeTraits.cpp index 4a033bf50bd4..99916f523aa9 100644 --- a/contrib/llvm-project/clang/lib/AST/ASTTypeTraits.cpp +++ b/contrib/llvm-project/clang/lib/AST/ASTTypeTraits.cpp @@ -13,10 +13,14 @@ //===----------------------------------------------------------------------===// #include "clang/AST/ASTTypeTraits.h" +#include "clang/AST/ASTConcept.h" #include "clang/AST/ASTContext.h" +#include "clang/AST/Attr.h" #include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclObjC.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/OpenMPClause.h" +#include "clang/AST/TypeLoc.h" using namespace clang; @@ -24,9 +28,12 @@ const ASTNodeKind::KindInfo ASTNodeKind::AllKindInfo[] = { {NKI_None, "<None>"}, {NKI_None, "TemplateArgument"}, {NKI_None, "TemplateArgumentLoc"}, + {NKI_None, "LambdaCapture"}, {NKI_None, "TemplateName"}, {NKI_None, "NestedNameSpecifierLoc"}, {NKI_None, "QualType"}, +#define TYPELOC(CLASS, PARENT) {NKI_##PARENT, #CLASS "TypeLoc"}, +#include "clang/AST/TypeLocNodes.def" {NKI_None, "TypeLoc"}, {NKI_None, "CXXBaseSpecifier"}, {NKI_None, "CXXCtorInitializer"}, @@ -44,12 +51,30 @@ const ASTNodeKind::KindInfo ASTNodeKind::AllKindInfo[] = { #define GEN_CLANG_CLAUSE_CLASS #define CLAUSE_CLASS(Enum, Str, Class) {NKI_OMPClause, #Class}, #include "llvm/Frontend/OpenMP/OMP.inc" + {NKI_None, "Attr"}, +#define ATTR(A) {NKI_Attr, #A "Attr"}, +#include "clang/Basic/AttrList.inc" + {NKI_None, "ObjCProtocolLoc"}, + {NKI_None, "ConceptReference"}, }; +bool ASTNodeKind::isBaseOf(ASTNodeKind Other) const { + return isBaseOf(KindId, Other.KindId); +} + bool ASTNodeKind::isBaseOf(ASTNodeKind Other, unsigned *Distance) const { return isBaseOf(KindId, Other.KindId, Distance); } +bool ASTNodeKind::isBaseOf(NodeKindId Base, NodeKindId Derived) { + if (Base == NKI_None || Derived == NKI_None) + return false; + while (Derived != Base && Derived != NKI_None) { + Derived = AllKindInfo[Derived].ParentId; + } + return Derived == Base; +} + bool ASTNodeKind::isBaseOf(NodeKindId Base, NodeKindId Derived, unsigned *Distance) { if (Base == NKI_None || Derived == NKI_None) return false; @@ -86,7 +111,7 @@ ASTNodeKind ASTNodeKind::getMostDerivedType(ASTNodeKind Kind1, ASTNodeKind ASTNodeKind::getMostDerivedCommonAncestor(ASTNodeKind Kind1, ASTNodeKind Kind2) { NodeKindId Parent = Kind1.KindId; - while (!isBaseOf(Parent, Kind2.KindId, nullptr) && Parent != NKI_None) { + while (!isBaseOf(Parent, Kind2.KindId) && Parent != NKI_None) { Parent = AllKindInfo[Parent].ParentId; } return ASTNodeKind(Parent); @@ -123,6 +148,17 @@ ASTNodeKind ASTNodeKind::getFromNode(const Type &T) { llvm_unreachable("invalid type kind"); } + ASTNodeKind ASTNodeKind::getFromNode(const TypeLoc &T) { + switch (T.getTypeLocClass()) { +#define ABSTRACT_TYPELOC(CLASS, PARENT) +#define TYPELOC(CLASS, PARENT) \ + case TypeLoc::CLASS: \ + return ASTNodeKind(NKI_##CLASS##TypeLoc); +#include "clang/AST/TypeLocNodes.def" + } + llvm_unreachable("invalid typeloc kind"); + } + ASTNodeKind ASTNodeKind::getFromNode(const OMPClause &C) { switch (C.getClauseKind()) { #define GEN_CLANG_CLAUSE_CLASS @@ -134,7 +170,17 @@ ASTNodeKind ASTNodeKind::getFromNode(const OMPClause &C) { llvm_unreachable("unexpected OpenMP clause kind"); #include "llvm/Frontend/OpenMP/OMP.inc" } - llvm_unreachable("invalid stmt kind"); + llvm_unreachable("invalid omp clause kind"); +} + +ASTNodeKind ASTNodeKind::getFromNode(const Attr &A) { + switch (A.getKind()) { +#define ATTR(A) \ + case attr::A: \ + return ASTNodeKind(NKI_##A##Attr); +#include "clang/Basic/AttrList.inc" + } + llvm_unreachable("invalid attr kind"); } void DynTypedNode::print(llvm::raw_ostream &OS, @@ -162,6 +208,12 @@ void DynTypedNode::print(llvm::raw_ostream &OS, S->printPretty(OS, nullptr, PP); else if (const Type *T = get<Type>()) QualType(T, 0).print(OS, PP); + else if (const Attr *A = get<Attr>()) + A->printPretty(OS, PP); + else if (const ObjCProtocolLoc *P = get<ObjCProtocolLoc>()) + P->getProtocol()->print(OS, PP); + else if (const ConceptReference *C = get<ConceptReference>()) + C->print(OS, PP); else OS << "Unable to print values of type " << NodeKind.asStringRef() << "\n"; } @@ -174,6 +226,10 @@ void DynTypedNode::dump(llvm::raw_ostream &OS, S->dump(OS, Context); else if (const Type *T = get<Type>()) T->dump(OS, Context); + else if (const ConceptReference *C = get<ConceptReference>()) + C->dump(OS); + else if (const TypeLoc *TL = get<TypeLoc>()) + TL->dump(OS, Context); else OS << "Unable to dump values of type " << NodeKind.asStringRef() << "\n"; } @@ -195,5 +251,11 @@ SourceRange DynTypedNode::getSourceRange() const { return SourceRange(C->getBeginLoc(), C->getEndLoc()); if (const auto *CBS = get<CXXBaseSpecifier>()) return CBS->getSourceRange(); + if (const auto *A = get<Attr>()) + return A->getRange(); + if (const ObjCProtocolLoc *P = get<ObjCProtocolLoc>()) + return P->getSourceRange(); + if (const ConceptReference *C = get<ConceptReference>()) + return C->getSourceRange(); return SourceRange(); } diff --git a/contrib/llvm-project/clang/lib/AST/AttrDocTable.cpp b/contrib/llvm-project/clang/lib/AST/AttrDocTable.cpp new file mode 100644 index 000000000000..56a143b9ed29 --- /dev/null +++ b/contrib/llvm-project/clang/lib/AST/AttrDocTable.cpp @@ -0,0 +1,27 @@ +//===--- AttrDocTable.cpp - implements Attr::getDocumentation() -*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains out-of-line methods for Attr classes. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/Attr.h" +#include "llvm/ADT/StringRef.h" + +#include "AttrDocTable.inc" + +static const llvm::StringRef AttrDoc[] = { +#define ATTR(NAME) AttrDoc_##NAME, +#include "clang/Basic/AttrList.inc" +}; + +llvm::StringRef clang::Attr::getDocumentation(clang::attr::Kind K) { + if (K < (int)std::size(AttrDoc)) + return AttrDoc[K]; + return ""; +} diff --git a/contrib/llvm-project/clang/lib/AST/AttrImpl.cpp b/contrib/llvm-project/clang/lib/AST/AttrImpl.cpp index 662f86722fa3..f198a9acf848 100644 --- a/contrib/llvm-project/clang/lib/AST/AttrImpl.cpp +++ b/contrib/llvm-project/clang/lib/AST/AttrImpl.cpp @@ -14,6 +14,7 @@ #include "clang/AST/Attr.h" #include "clang/AST/Expr.h" #include "clang/AST/Type.h" +#include <optional> using namespace clang; void LoopHintAttr::printPrettyPragma(raw_ostream &OS, @@ -60,7 +61,7 @@ std::string LoopHintAttr::getValueString(const PrintingPolicy &Policy) const { else OS << "disable"; OS << ")"; - return OS.str(); + return ValueName; } // Return a string suitable for identifying this attribute in diagnostics. @@ -137,49 +138,58 @@ void OMPDeclareTargetDeclAttr::printPrettyPragma( // Use fake syntax because it is for testing and debugging purpose only. if (getDevType() != DT_Any) OS << " device_type(" << ConvertDevTypeTyToStr(getDevType()) << ")"; - if (getMapType() != MT_To) + if (getMapType() != MT_To && getMapType() != MT_Enter) OS << ' ' << ConvertMapTypeTyToStr(getMapType()); + if (Expr *E = getIndirectExpr()) { + OS << " indirect("; + E->printPretty(OS, nullptr, Policy); + OS << ")"; + } else if (getIndirect()) { + OS << " indirect"; + } } -llvm::Optional<OMPDeclareTargetDeclAttr *> +std::optional<OMPDeclareTargetDeclAttr *> OMPDeclareTargetDeclAttr::getActiveAttr(const ValueDecl *VD) { - if (!VD->hasAttrs()) - return llvm::None; + if (llvm::all_of(VD->redecls(), [](const Decl *D) { return !D->hasAttrs(); })) + return std::nullopt; unsigned Level = 0; OMPDeclareTargetDeclAttr *FoundAttr = nullptr; - for (auto *Attr : VD->specific_attrs<OMPDeclareTargetDeclAttr>()) { - if (Level <= Attr->getLevel()) { - Level = Attr->getLevel(); - FoundAttr = Attr; + for (const Decl *D : VD->redecls()) { + for (auto *Attr : D->specific_attrs<OMPDeclareTargetDeclAttr>()) { + if (Level <= Attr->getLevel()) { + Level = Attr->getLevel(); + FoundAttr = Attr; + } } } if (FoundAttr) return FoundAttr; - return llvm::None; + return std::nullopt; } -llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> +std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(const ValueDecl *VD) { - llvm::Optional<OMPDeclareTargetDeclAttr *> ActiveAttr = getActiveAttr(VD); - if (ActiveAttr.hasValue()) - return ActiveAttr.getValue()->getMapType(); - return llvm::None; + std::optional<OMPDeclareTargetDeclAttr *> ActiveAttr = getActiveAttr(VD); + if (ActiveAttr) + return (*ActiveAttr)->getMapType(); + return std::nullopt; } -llvm::Optional<OMPDeclareTargetDeclAttr::DevTypeTy> +std::optional<OMPDeclareTargetDeclAttr::DevTypeTy> OMPDeclareTargetDeclAttr::getDeviceType(const ValueDecl *VD) { - llvm::Optional<OMPDeclareTargetDeclAttr *> ActiveAttr = getActiveAttr(VD); - if (ActiveAttr.hasValue()) - return ActiveAttr.getValue()->getDevType(); - return llvm::None; + std::optional<OMPDeclareTargetDeclAttr *> ActiveAttr = getActiveAttr(VD); + if (ActiveAttr) + return (*ActiveAttr)->getDevType(); + return std::nullopt; } -llvm::Optional<SourceLocation> +std::optional<SourceLocation> OMPDeclareTargetDeclAttr::getLocation(const ValueDecl *VD) { - llvm::Optional<OMPDeclareTargetDeclAttr *> ActiveAttr = getActiveAttr(VD); - if (ActiveAttr.hasValue()) - return ActiveAttr.getValue()->getRange().getBegin(); - return llvm::None; + std::optional<OMPDeclareTargetDeclAttr *> ActiveAttr = getActiveAttr(VD); + if (ActiveAttr) + return (*ActiveAttr)->getRange().getBegin(); + return std::nullopt; } namespace clang { @@ -195,6 +205,69 @@ void OMPDeclareVariantAttr::printPrettyPragma( OS << ")"; } OS << " match(" << traitInfos << ")"; + + auto PrintExprs = [&OS, &Policy](Expr **Begin, Expr **End) { + for (Expr **I = Begin; I != End; ++I) { + assert(*I && "Expected non-null Stmt"); + if (I != Begin) + OS << ","; + (*I)->printPretty(OS, nullptr, Policy); + } + }; + if (adjustArgsNothing_size()) { + OS << " adjust_args(nothing:"; + PrintExprs(adjustArgsNothing_begin(), adjustArgsNothing_end()); + OS << ")"; + } + if (adjustArgsNeedDevicePtr_size()) { + OS << " adjust_args(need_device_ptr:"; + PrintExprs(adjustArgsNeedDevicePtr_begin(), adjustArgsNeedDevicePtr_end()); + OS << ")"; + } + + auto PrintInteropInfo = [&OS](OMPInteropInfo *Begin, OMPInteropInfo *End) { + for (OMPInteropInfo *I = Begin; I != End; ++I) { + if (I != Begin) + OS << ", "; + OS << "interop("; + OS << getInteropTypeString(I); + OS << ")"; + } + }; + if (appendArgs_size()) { + OS << " append_args("; + PrintInteropInfo(appendArgs_begin(), appendArgs_end()); + OS << ")"; + } +} + +unsigned AlignedAttr::getAlignment(ASTContext &Ctx) const { + assert(!isAlignmentDependent()); + if (getCachedAlignmentValue()) + return *getCachedAlignmentValue(); + + // Handle alignmentType case. + if (!isAlignmentExpr()) { + QualType T = getAlignmentType()->getType(); + + // C++ [expr.alignof]p3: + // When alignof is applied to a reference type, the result is the + // alignment of the referenced type. + T = T.getNonReferenceType(); + + if (T.getQualifiers().hasUnaligned()) + return Ctx.getCharWidth(); + + return Ctx.getTypeAlignInChars(T.getTypePtr()).getQuantity() * + Ctx.getCharWidth(); + } + + // Handle alignmentExpr case. + if (alignmentExpr) + return alignmentExpr->EvaluateKnownConstInt(Ctx).getZExtValue() * + Ctx.getCharWidth(); + + return Ctx.getTargetDefaultAlignForAttributeAligned(); } #include "clang/AST/AttrImpl.inc" diff --git a/contrib/llvm-project/clang/lib/AST/Availability.cpp b/contrib/llvm-project/clang/lib/AST/Availability.cpp new file mode 100644 index 000000000000..238359a2dedf --- /dev/null +++ b/contrib/llvm-project/clang/lib/AST/Availability.cpp @@ -0,0 +1,48 @@ +//===- Availability.cpp --------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements the Availability information for Decls. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/Availability.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Attr.h" +#include "clang/AST/Decl.h" +#include "clang/Basic/TargetInfo.h" + +namespace clang { + +AvailabilityInfo AvailabilityInfo::createFromDecl(const Decl *Decl) { + ASTContext &Context = Decl->getASTContext(); + StringRef PlatformName = Context.getTargetInfo().getPlatformName(); + AvailabilityInfo Availability; + + // Collect availability attributes from all redeclarations. + for (const auto *RD : Decl->redecls()) { + for (const auto *A : RD->specific_attrs<AvailabilityAttr>()) { + if (A->getPlatform()->getName() != PlatformName) + continue; + Availability = AvailabilityInfo( + A->getPlatform()->getName(), A->getIntroduced(), A->getDeprecated(), + A->getObsoleted(), A->getUnavailable(), false, false); + break; + } + + if (const auto *A = RD->getAttr<UnavailableAttr>()) + if (!A->isImplicit()) + Availability.UnconditionallyUnavailable = true; + + if (const auto *A = RD->getAttr<DeprecatedAttr>()) + if (!A->isImplicit()) + Availability.UnconditionallyDeprecated = true; + } + return Availability; +} + +} // namespace clang diff --git a/contrib/llvm-project/clang/lib/AST/CXXABI.h b/contrib/llvm-project/clang/lib/AST/CXXABI.h index ca9424bcb7a4..9258a53fefeb 100644 --- a/contrib/llvm-project/clang/lib/AST/CXXABI.h +++ b/contrib/llvm-project/clang/lib/AST/CXXABI.h @@ -21,7 +21,6 @@ namespace clang { class ASTContext; class CXXConstructorDecl; class DeclaratorDecl; -class Expr; class MangleContext; class MangleNumberingContext; class MemberPointerType; diff --git a/contrib/llvm-project/clang/lib/AST/CXXInheritance.cpp b/contrib/llvm-project/clang/lib/AST/CXXInheritance.cpp index 9027fa7a7515..25de2a20a7f3 100644 --- a/contrib/llvm-project/clang/lib/AST/CXXInheritance.cpp +++ b/contrib/llvm-project/clang/lib/AST/CXXInheritance.cpp @@ -22,7 +22,6 @@ #include "clang/Basic/LLVM.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/STLExtras.h" -#include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/Casting.h" @@ -81,7 +80,8 @@ bool CXXRecordDecl::isDerivedFrom(const CXXRecordDecl *Base, const CXXRecordDecl *BaseDecl = Base->getCanonicalDecl(); return lookupInBases( [BaseDecl](const CXXBaseSpecifier *Specifier, CXXBasePath &Path) { - return FindBaseClass(Specifier, Path, BaseDecl); + return Specifier->getType()->getAsRecordDecl() && + FindBaseClass(Specifier, Path, BaseDecl); }, Paths); } @@ -465,7 +465,7 @@ void OverridingMethods::add(unsigned OverriddenSubobject, UniqueVirtualMethod Overriding) { SmallVectorImpl<UniqueVirtualMethod> &SubobjectOverrides = Overrides[OverriddenSubobject]; - if (llvm::find(SubobjectOverrides, Overriding) == SubobjectOverrides.end()) + if (!llvm::is_contained(SubobjectOverrides, Overriding)) SubobjectOverrides.push_back(Overriding); } @@ -671,9 +671,7 @@ CXXRecordDecl::getFinalOverriders(CXXFinalOverriderMap &FinalOverriders) const { // FIXME: IsHidden reads from Overriding from the middle of a remove_if // over the same sequence! Is this guaranteed to work? - Overriding.erase( - std::remove_if(Overriding.begin(), Overriding.end(), IsHidden), - Overriding.end()); + llvm::erase_if(Overriding, IsHidden); } } } diff --git a/contrib/llvm-project/clang/lib/AST/Comment.cpp b/contrib/llvm-project/clang/lib/AST/Comment.cpp index a02cc9d119fe..cce8b12170f2 100644 --- a/contrib/llvm-project/clang/lib/AST/Comment.cpp +++ b/contrib/llvm-project/clang/lib/AST/Comment.cpp @@ -29,15 +29,16 @@ namespace comments { #undef ABSTRACT_COMMENT // DeclInfo is also allocated with a BumpPtrAllocator. -static_assert(std::is_trivially_destructible<DeclInfo>::value, +static_assert(std::is_trivially_destructible_v<DeclInfo>, "DeclInfo should be trivially destructible!"); const char *Comment::getCommentKindName() const { switch (getCommentKind()) { - case NoCommentKind: return "NoCommentKind"; + case CommentKind::None: + return "None"; #define ABSTRACT_COMMENT(COMMENT) -#define COMMENT(CLASS, PARENT) \ - case CLASS##Kind: \ +#define COMMENT(CLASS, PARENT) \ + case CommentKind::CLASS: \ return #CLASS; #include "clang/AST/CommentNodes.inc" #undef COMMENT @@ -81,10 +82,11 @@ static inline void CheckCommentASTNodes() { Comment::child_iterator Comment::child_begin() const { switch (getCommentKind()) { - case NoCommentKind: llvm_unreachable("comment without a kind"); + case CommentKind::None: + llvm_unreachable("comment without a kind"); #define ABSTRACT_COMMENT(COMMENT) -#define COMMENT(CLASS, PARENT) \ - case CLASS##Kind: \ +#define COMMENT(CLASS, PARENT) \ + case CommentKind::CLASS: \ return static_cast<const CLASS *>(this)->child_begin(); #include "clang/AST/CommentNodes.inc" #undef COMMENT @@ -95,10 +97,11 @@ Comment::child_iterator Comment::child_begin() const { Comment::child_iterator Comment::child_end() const { switch (getCommentKind()) { - case NoCommentKind: llvm_unreachable("comment without a kind"); + case CommentKind::None: + llvm_unreachable("comment without a kind"); #define ABSTRACT_COMMENT(COMMENT) -#define COMMENT(CLASS, PARENT) \ - case CLASS##Kind: \ +#define COMMENT(CLASS, PARENT) \ + case CommentKind::CLASS: \ return static_cast<const CLASS *>(this)->child_end(); #include "clang/AST/CommentNodes.inc" #undef COMMENT @@ -108,12 +111,7 @@ Comment::child_iterator Comment::child_end() const { } bool TextComment::isWhitespaceNoCache() const { - for (StringRef::const_iterator I = Text.begin(), E = Text.end(); - I != E; ++I) { - if (!clang::isWhitespace(*I)) - return false; - } - return true; + return llvm::all_of(Text, clang::isWhitespace); } bool ParagraphComment::isWhitespaceNoCache() const { @@ -189,13 +187,14 @@ static bool getFunctionTypeLoc(TypeLoc TL, FunctionTypeLoc &ResFTL) { return false; } -const char *ParamCommandComment::getDirectionAsString(PassDirection D) { +const char * +ParamCommandComment::getDirectionAsString(ParamCommandPassDirection D) { switch (D) { - case ParamCommandComment::In: + case ParamCommandPassDirection::In: return "[in]"; - case ParamCommandComment::Out: + case ParamCommandPassDirection::Out: return "[out]"; - case ParamCommandComment::InOut: + case ParamCommandPassDirection::InOut: return "[in,out]"; } llvm_unreachable("unknown PassDirection"); @@ -210,7 +209,8 @@ void DeclInfo::fill() { IsObjCMethod = false; IsInstanceMethod = false; IsClassMethod = false; - ParamVars = None; + IsVariadic = false; + ParamVars = std::nullopt; TemplateParameters = nullptr; if (!CommentDecl) { @@ -221,6 +221,7 @@ void DeclInfo::fill() { CurrentDecl = CommentDecl; Decl::Kind K = CommentDecl->getKind(); + const TypeSourceInfo *TSI = nullptr; switch (K) { default: // Defaults are should be good for declarations we don't handle explicitly. @@ -247,6 +248,8 @@ void DeclInfo::fill() { IsInstanceMethod = MD->isInstance(); IsClassMethod = !IsInstanceMethod; } + IsVariadic = FD->isVariadic(); + assert(involvesFunctionType()); break; } case Decl::ObjCMethod: { @@ -257,6 +260,8 @@ void DeclInfo::fill() { IsObjCMethod = true; IsInstanceMethod = MD->isInstanceMethod(); IsClassMethod = !IsInstanceMethod; + IsVariadic = MD->isVariadic(); + assert(involvesFunctionType()); break; } case Decl::FunctionTemplate: { @@ -267,6 +272,8 @@ void DeclInfo::fill() { ParamVars = FD->parameters(); ReturnType = FD->getReturnType(); TemplateParameters = FTD->getTemplateParameters(); + IsVariadic = FD->isVariadic(); + assert(involvesFunctionType()); break; } case Decl::ClassTemplate: { @@ -293,76 +300,66 @@ void DeclInfo::fill() { Kind = ClassKind; break; case Decl::Var: + if (const VarTemplateDecl *VTD = + cast<VarDecl>(CommentDecl)->getDescribedVarTemplate()) { + TemplateKind = TemplateSpecialization; + TemplateParameters = VTD->getTemplateParameters(); + } + [[fallthrough]]; case Decl::Field: case Decl::EnumConstant: case Decl::ObjCIvar: case Decl::ObjCAtDefsField: - case Decl::ObjCProperty: { - const TypeSourceInfo *TSI; + case Decl::ObjCProperty: if (const auto *VD = dyn_cast<DeclaratorDecl>(CommentDecl)) TSI = VD->getTypeSourceInfo(); else if (const auto *PD = dyn_cast<ObjCPropertyDecl>(CommentDecl)) TSI = PD->getTypeSourceInfo(); - else - TSI = nullptr; - if (TSI) { - TypeLoc TL = TSI->getTypeLoc().getUnqualifiedLoc(); - FunctionTypeLoc FTL; - if (getFunctionTypeLoc(TL, FTL)) { - ParamVars = FTL.getParams(); - ReturnType = FTL.getReturnLoc().getType(); - } - } Kind = VariableKind; break; + case Decl::VarTemplate: { + const VarTemplateDecl *VTD = cast<VarTemplateDecl>(CommentDecl); + Kind = VariableKind; + TemplateKind = Template; + TemplateParameters = VTD->getTemplateParameters(); + if (const VarDecl *VD = VTD->getTemplatedDecl()) + TSI = VD->getTypeSourceInfo(); + break; } case Decl::Namespace: Kind = NamespaceKind; break; case Decl::TypeAlias: - case Decl::Typedef: { + case Decl::Typedef: Kind = TypedefKind; - // If this is a typedef / using to something we consider a function, extract - // arguments and return type. - const TypeSourceInfo *TSI = - K == Decl::Typedef - ? cast<TypedefDecl>(CommentDecl)->getTypeSourceInfo() - : cast<TypeAliasDecl>(CommentDecl)->getTypeSourceInfo(); - if (!TSI) - break; - TypeLoc TL = TSI->getTypeLoc().getUnqualifiedLoc(); - FunctionTypeLoc FTL; - if (getFunctionTypeLoc(TL, FTL)) { - Kind = FunctionKind; - ParamVars = FTL.getParams(); - ReturnType = FTL.getReturnLoc().getType(); - } + TSI = cast<TypedefNameDecl>(CommentDecl)->getTypeSourceInfo(); break; - } case Decl::TypeAliasTemplate: { const TypeAliasTemplateDecl *TAT = cast<TypeAliasTemplateDecl>(CommentDecl); Kind = TypedefKind; TemplateKind = Template; TemplateParameters = TAT->getTemplateParameters(); - TypeAliasDecl *TAD = TAT->getTemplatedDecl(); - if (!TAD) - break; + if (TypeAliasDecl *TAD = TAT->getTemplatedDecl()) + TSI = TAD->getTypeSourceInfo(); + break; + } + case Decl::Enum: + Kind = EnumKind; + break; + } - const TypeSourceInfo *TSI = TAD->getTypeSourceInfo(); - if (!TSI) - break; + // If the type is a typedef / using to something we consider a function, + // extract arguments and return type. + if (TSI) { TypeLoc TL = TSI->getTypeLoc().getUnqualifiedLoc(); FunctionTypeLoc FTL; if (getFunctionTypeLoc(TL, FTL)) { - Kind = FunctionKind; ParamVars = FTL.getParams(); ReturnType = FTL.getReturnLoc().getType(); + if (const auto *FPT = dyn_cast<FunctionProtoType>(FTL.getTypePtr())) + IsVariadic = FPT->isVariadic(); + assert(involvesFunctionType()); } - break; - } - case Decl::Enum: - Kind = EnumKind; - break; } IsFilled = true; diff --git a/contrib/llvm-project/clang/lib/AST/CommentBriefParser.cpp b/contrib/llvm-project/clang/lib/AST/CommentBriefParser.cpp index 2b648cbb1d4b..bf9e17993497 100644 --- a/contrib/llvm-project/clang/lib/AST/CommentBriefParser.cpp +++ b/contrib/llvm-project/clang/lib/AST/CommentBriefParser.cpp @@ -8,15 +8,12 @@ #include "clang/AST/CommentBriefParser.h" #include "clang/AST/CommentCommandTraits.h" +#include "clang/Basic/CharInfo.h" namespace clang { namespace comments { namespace { -inline bool isWhitespace(char C) { - return C == ' ' || C == '\n' || C == '\r' || - C == '\t' || C == '\f' || C == '\v'; -} /// Convert all whitespace into spaces, remove leading and trailing spaces, /// compress multiple spaces into one. @@ -26,12 +23,11 @@ void cleanupBrief(std::string &S) { for (std::string::iterator I = S.begin(), E = S.end(); I != E; ++I) { const char C = *I; - if (isWhitespace(C)) { + if (clang::isWhitespace(C)) { if (!PrevWasSpace) { *O++ = ' '; PrevWasSpace = true; } - continue; } else { *O++ = C; PrevWasSpace = false; @@ -44,12 +40,7 @@ void cleanupBrief(std::string &S) { } bool isWhitespace(StringRef Text) { - for (StringRef::const_iterator I = Text.begin(), E = Text.end(); - I != E; ++I) { - if (!isWhitespace(*I)) - return false; - } - return true; + return llvm::all_of(Text, clang::isWhitespace); } } // unnamed namespace @@ -123,7 +114,7 @@ std::string BriefParser::Parse() { // We found a paragraph end. This ends the brief description if // \command or its equivalent was explicitly used. // Stop scanning text because an explicit \paragraph is the - // preffered one. + // preferred one. if (InBrief) break; // End first paragraph if we found some non-whitespace text. diff --git a/contrib/llvm-project/clang/lib/AST/CommentCommandTraits.cpp b/contrib/llvm-project/clang/lib/AST/CommentCommandTraits.cpp index bdc0dd47fb7d..a37a0e18432c 100644 --- a/contrib/llvm-project/clang/lib/AST/CommentCommandTraits.cpp +++ b/contrib/llvm-project/clang/lib/AST/CommentCommandTraits.cpp @@ -16,8 +16,8 @@ namespace comments { #include "clang/AST/CommentCommandInfo.inc" CommandTraits::CommandTraits(llvm::BumpPtrAllocator &Allocator, - const CommentOptions &CommentOptions) : - NextID(llvm::array_lengthof(Commands)), Allocator(Allocator) { + const CommentOptions &CommentOptions) + : NextID(std::size(Commands)), Allocator(Allocator) { registerCommentOptions(CommentOptions); } @@ -115,7 +115,7 @@ const CommandInfo *CommandTraits::registerBlockCommand(StringRef CommandName) { const CommandInfo *CommandTraits::getBuiltinCommandInfo( unsigned CommandID) { - if (CommandID < llvm::array_lengthof(Commands)) + if (CommandID < std::size(Commands)) return &Commands[CommandID]; return nullptr; } @@ -131,7 +131,7 @@ const CommandInfo *CommandTraits::getRegisteredCommandInfo( const CommandInfo *CommandTraits::getRegisteredCommandInfo( unsigned CommandID) const { - return RegisteredCommands[CommandID - llvm::array_lengthof(Commands)]; + return RegisteredCommands[CommandID - std::size(Commands)]; } } // end namespace comments diff --git a/contrib/llvm-project/clang/lib/AST/CommentLexer.cpp b/contrib/llvm-project/clang/lib/AST/CommentLexer.cpp index 4bebd41e15ee..f0250fc9fd55 100644 --- a/contrib/llvm-project/clang/lib/AST/CommentLexer.cpp +++ b/contrib/llvm-project/clang/lib/AST/CommentLexer.cpp @@ -94,31 +94,12 @@ void Lexer::skipLineStartingDecorations() { if (BufferPtr == CommentEnd) return; - switch (*BufferPtr) { - case ' ': - case '\t': - case '\f': - case '\v': { - const char *NewBufferPtr = BufferPtr; - NewBufferPtr++; - if (NewBufferPtr == CommentEnd) + const char *NewBufferPtr = BufferPtr; + while (isHorizontalWhitespace(*NewBufferPtr)) + if (++NewBufferPtr == CommentEnd) return; - - char C = *NewBufferPtr; - while (isHorizontalWhitespace(C)) { - NewBufferPtr++; - if (NewBufferPtr == CommentEnd) - return; - C = *NewBufferPtr; - } - if (C == '*') - BufferPtr = NewBufferPtr + 1; - break; - } - case '*': - BufferPtr++; - break; - } + if (*NewBufferPtr == '*') + BufferPtr = NewBufferPtr + 1; } namespace { @@ -289,6 +270,29 @@ void Lexer::formTokenWithChars(Token &Result, const char *TokEnd, BufferPtr = TokEnd; } +const char *Lexer::skipTextToken() { + const char *TokenPtr = BufferPtr; + assert(TokenPtr < CommentEnd); + StringRef TokStartSymbols = ParseCommands ? "\n\r\\@\"&<" : "\n\r"; + +again: + size_t End = + StringRef(TokenPtr, CommentEnd - TokenPtr).find_first_of(TokStartSymbols); + if (End == StringRef::npos) + return CommentEnd; + + // Doxygen doesn't recognize any commands in a one-line double quotation. + // If we don't find an ending quotation mark, we pretend it never began. + if (*(TokenPtr + End) == '\"') { + TokenPtr += End + 1; + End = StringRef(TokenPtr, CommentEnd - TokenPtr).find_first_of("\n\r\""); + if (End != StringRef::npos && *(TokenPtr + End) == '\"') + TokenPtr += End + 1; + goto again; + } + return TokenPtr + End; +} + void Lexer::lexCommentText(Token &T) { assert(CommentState == LCS_InsideBCPLComment || CommentState == LCS_InsideCComment); @@ -309,17 +313,8 @@ void Lexer::lexCommentText(Token &T) { skipLineStartingDecorations(); return; - default: { - StringRef TokStartSymbols = ParseCommands ? "\n\r\\@&<" : "\n\r"; - size_t End = StringRef(TokenPtr, CommentEnd - TokenPtr) - .find_first_of(TokStartSymbols); - if (End != StringRef::npos) - TokenPtr += End; - else - TokenPtr = CommentEnd; - formTextToken(T, TokenPtr); - return; - } + default: + return formTextToken(T, skipTextToken()); } }; @@ -392,10 +387,11 @@ void Lexer::lexCommentText(Token &T) { unsigned Length = TokenPtr - (BufferPtr + 1); // Hardcoded support for lexing LaTeX formula commands - // \f$ \f[ \f] \f{ \f} as a single command. + // \f$ \f( \f) \f[ \f] \f{ \f} as a single command. if (Length == 1 && TokenPtr[-1] == 'f' && TokenPtr != CommentEnd) { C = *TokenPtr; - if (C == '$' || C == '[' || C == ']' || C == '{' || C == '}') { + if (C == '$' || C == '(' || C == ')' || C == '[' || C == ']' || + C == '{' || C == '}') { TokenPtr++; Length++; } @@ -705,7 +701,7 @@ void Lexer::lexHTMLStartTag(Token &T) { C = *BufferPtr; if (!isHTMLIdentifierStartingCharacter(C) && - C != '=' && C != '\"' && C != '\'' && C != '>') { + C != '=' && C != '\"' && C != '\'' && C != '>' && C != '/') { State = LS_Normal; return; } diff --git a/contrib/llvm-project/clang/lib/AST/CommentParser.cpp b/contrib/llvm-project/clang/lib/AST/CommentParser.cpp index 29983b0a16c3..d5e5bb27ceba 100644 --- a/contrib/llvm-project/clang/lib/AST/CommentParser.cpp +++ b/contrib/llvm-project/clang/lib/AST/CommentParser.cpp @@ -89,6 +89,31 @@ class TextTokenRetokenizer { } } + /// Extract a template type + bool lexTemplate(SmallString<32> &WordText) { + unsigned BracketCount = 0; + while (!isEnd()) { + const char C = peek(); + WordText.push_back(C); + consumeChar(); + switch (C) { + case '<': { + BracketCount++; + break; + } + case '>': { + BracketCount--; + if (!BracketCount) + return true; + break; + } + default: + break; + } + } + return false; + } + /// Add a token. /// Returns true on success, false if there are no interesting tokens to /// fetch from lexer. @@ -149,6 +174,111 @@ public: addToken(); } + /// Extract a type argument + bool lexType(Token &Tok) { + if (isEnd()) + return false; + + // Save current position in case we need to rollback because the type is + // empty. + Position SavedPos = Pos; + + // Consume any leading whitespace. + consumeWhitespace(); + SmallString<32> WordText; + const char *WordBegin = Pos.BufferPtr; + SourceLocation Loc = getSourceLocation(); + + while (!isEnd()) { + const char C = peek(); + // For non-whitespace characters we check if it's a template or otherwise + // continue reading the text into a word. + if (!isWhitespace(C)) { + if (C == '<') { + if (!lexTemplate(WordText)) + return false; + } else { + WordText.push_back(C); + consumeChar(); + } + } else { + consumeChar(); + break; + } + } + + const unsigned Length = WordText.size(); + if (Length == 0) { + Pos = SavedPos; + return false; + } + + char *TextPtr = Allocator.Allocate<char>(Length + 1); + + memcpy(TextPtr, WordText.c_str(), Length + 1); + StringRef Text = StringRef(TextPtr, Length); + + formTokenWithChars(Tok, Loc, WordBegin, Length, Text); + return true; + } + + // Check if this line starts with @par or \par + bool startsWithParCommand() { + unsigned Offset = 1; + + // Skip all whitespace characters at the beginning. + // This needs to backtrack because Pos has already advanced past the + // actual \par or @par command by the time this function is called. + while (isWhitespace(*(Pos.BufferPtr - Offset))) + Offset++; + + // Once we've reached the whitespace, backtrack and check if the previous + // four characters are \par or @par. + llvm::StringRef LineStart(Pos.BufferPtr - Offset - 3, 4); + return LineStart.starts_with("\\par") || LineStart.starts_with("@par"); + } + + /// Extract a par command argument-header. + bool lexParHeading(Token &Tok) { + if (isEnd()) + return false; + + Position SavedPos = Pos; + + consumeWhitespace(); + SmallString<32> WordText; + const char *WordBegin = Pos.BufferPtr; + SourceLocation Loc = getSourceLocation(); + + if (!startsWithParCommand()) + return false; + + // Read until the end of this token, which is effectively the end of the + // line. This gets us the content of the par header, if there is one. + while (!isEnd()) { + WordText.push_back(peek()); + if (Pos.BufferPtr + 1 == Pos.BufferEnd) { + consumeChar(); + break; + } + consumeChar(); + } + + unsigned Length = WordText.size(); + if (Length == 0) { + Pos = SavedPos; + return false; + } + + char *TextPtr = Allocator.Allocate<char>(Length + 1); + + memcpy(TextPtr, WordText.c_str(), Length + 1); + StringRef Text = StringRef(TextPtr, Length); + + formTokenWithChars(Tok, Loc, WordBegin, Length, Text); + return true; + } + /// Extract a word -- sequence of non-whitespace characters. bool lexWord(Token &Tok) { if (isEnd()) @@ -245,7 +375,7 @@ public: Pos.CurToken++; } - P.putBack(llvm::makeArrayRef(Toks.begin() + Pos.CurToken, Toks.end())); + P.putBack(llvm::ArrayRef(Toks.begin() + Pos.CurToken, Toks.end())); Pos.CurToken = Toks.size(); if (HavePartialTok) @@ -289,22 +419,54 @@ void Parser::parseTParamCommandArgs(TParamCommandComment *TPC, Arg.getText()); } -void Parser::parseBlockCommandArgs(BlockCommandComment *BC, - TextTokenRetokenizer &Retokenizer, - unsigned NumArgs) { - typedef BlockCommandComment::Argument Argument; - Argument *Args = - new (Allocator.Allocate<Argument>(NumArgs)) Argument[NumArgs]; +ArrayRef<Comment::Argument> +Parser::parseCommandArgs(TextTokenRetokenizer &Retokenizer, unsigned NumArgs) { + auto *Args = new (Allocator.Allocate<Comment::Argument>(NumArgs)) + Comment::Argument[NumArgs]; unsigned ParsedArgs = 0; Token Arg; while (ParsedArgs < NumArgs && Retokenizer.lexWord(Arg)) { - Args[ParsedArgs] = Argument(SourceRange(Arg.getLocation(), - Arg.getEndLocation()), - Arg.getText()); + Args[ParsedArgs] = Comment::Argument{ + SourceRange(Arg.getLocation(), Arg.getEndLocation()), Arg.getText()}; ParsedArgs++; } - S.actOnBlockCommandArgs(BC, llvm::makeArrayRef(Args, ParsedArgs)); + return llvm::ArrayRef(Args, ParsedArgs); +} + +ArrayRef<Comment::Argument> +Parser::parseThrowCommandArgs(TextTokenRetokenizer &Retokenizer, + unsigned NumArgs) { + auto *Args = new (Allocator.Allocate<Comment::Argument>(NumArgs)) + Comment::Argument[NumArgs]; + unsigned ParsedArgs = 0; + Token Arg; + + while (ParsedArgs < NumArgs && Retokenizer.lexType(Arg)) { + Args[ParsedArgs] = Comment::Argument{ + SourceRange(Arg.getLocation(), Arg.getEndLocation()), Arg.getText()}; + ParsedArgs++; + } + + return llvm::ArrayRef(Args, ParsedArgs); +} + +ArrayRef<Comment::Argument> +Parser::parseParCommandArgs(TextTokenRetokenizer &Retokenizer, + unsigned NumArgs) { + assert(NumArgs > 0); + auto *Args = new (Allocator.Allocate<Comment::Argument>(NumArgs)) + Comment::Argument[NumArgs]; + unsigned ParsedArgs = 0; + Token Arg; + + while (ParsedArgs < NumArgs && Retokenizer.lexParHeading(Arg)) { + Args[ParsedArgs] = Comment::Argument{ + SourceRange(Arg.getLocation(), Arg.getEndLocation()), Arg.getText()}; + ParsedArgs++; + } + + return llvm::ArrayRef(Args, ParsedArgs); } BlockCommandComment *Parser::parseBlockCommand() { @@ -337,7 +499,7 @@ BlockCommandComment *Parser::parseBlockCommand() { if (isTokBlockCommand()) { // Block command ahead. We can't nest block commands, so pretend that this // command has an empty argument. - ParagraphComment *Paragraph = S.actOnParagraphComment(None); + ParagraphComment *Paragraph = S.actOnParagraphComment(std::nullopt); if (PC) { S.actOnParamCommandFinish(PC, Paragraph); return PC; @@ -359,8 +521,14 @@ BlockCommandComment *Parser::parseBlockCommand() { parseParamCommandArgs(PC, Retokenizer); else if (TPC) parseTParamCommandArgs(TPC, Retokenizer); + else if (Info->IsThrowsCommand) + S.actOnBlockCommandArgs( + BC, parseThrowCommandArgs(Retokenizer, Info->NumArgs)); + else if (Info->IsParCommand) + S.actOnBlockCommandArgs(BC, + parseParCommandArgs(Retokenizer, Info->NumArgs)); else - parseBlockCommandArgs(BC, Retokenizer, Info->NumArgs); + S.actOnBlockCommandArgs(BC, parseCommandArgs(Retokenizer, Info->NumArgs)); Retokenizer.putBackLeftoverTokens(); } @@ -379,7 +547,7 @@ BlockCommandComment *Parser::parseBlockCommand() { ParagraphComment *Paragraph; if (EmptyParagraph) - Paragraph = S.actOnParagraphComment(None); + Paragraph = S.actOnParagraphComment(std::nullopt); else { BlockContentComment *Block = parseParagraphOrBlockCommand(); // Since we have checked for a block command, we should have parsed a @@ -401,32 +569,24 @@ BlockCommandComment *Parser::parseBlockCommand() { InlineCommandComment *Parser::parseInlineCommand() { assert(Tok.is(tok::backslash_command) || Tok.is(tok::at_command)); + const CommandInfo *Info = Traits.getCommandInfo(Tok.getCommandID()); const Token CommandTok = Tok; consumeToken(); TextTokenRetokenizer Retokenizer(Allocator, *this); + ArrayRef<Comment::Argument> Args = + parseCommandArgs(Retokenizer, Info->NumArgs); - Token ArgTok; - bool ArgTokValid = Retokenizer.lexWord(ArgTok); - - InlineCommandComment *IC; - if (ArgTokValid) { - IC = S.actOnInlineCommand(CommandTok.getLocation(), - CommandTok.getEndLocation(), - CommandTok.getCommandID(), - ArgTok.getLocation(), - ArgTok.getEndLocation(), - ArgTok.getText()); - } else { - IC = S.actOnInlineCommand(CommandTok.getLocation(), - CommandTok.getEndLocation(), - CommandTok.getCommandID()); + InlineCommandComment *IC = S.actOnInlineCommand( + CommandTok.getLocation(), CommandTok.getEndLocation(), + CommandTok.getCommandID(), Args); + if (Args.size() < Info->NumArgs) { Diag(CommandTok.getEndLocation().getLocWithOffset(1), - diag::warn_doc_inline_contents_no_argument) - << CommandTok.is(tok::at_command) - << Traits.getCommandInfo(CommandTok.getCommandID())->Name + diag::warn_doc_inline_command_not_enough_arguments) + << CommandTok.is(tok::at_command) << Info->Name << Args.size() + << Info->NumArgs << SourceRange(CommandTok.getLocation(), CommandTok.getEndLocation()); } @@ -478,16 +638,14 @@ HTMLStartTagComment *Parser::parseHTMLStartTag() { } case tok::html_greater: - S.actOnHTMLStartTagFinish(HST, - S.copyArray(llvm::makeArrayRef(Attrs)), + S.actOnHTMLStartTagFinish(HST, S.copyArray(llvm::ArrayRef(Attrs)), Tok.getLocation(), /* IsSelfClosing = */ false); consumeToken(); return HST; case tok::html_slash_greater: - S.actOnHTMLStartTagFinish(HST, - S.copyArray(llvm::makeArrayRef(Attrs)), + S.actOnHTMLStartTagFinish(HST, S.copyArray(llvm::ArrayRef(Attrs)), Tok.getLocation(), /* IsSelfClosing = */ true); consumeToken(); @@ -505,16 +663,14 @@ HTMLStartTagComment *Parser::parseHTMLStartTag() { Tok.is(tok::html_slash_greater)) continue; - S.actOnHTMLStartTagFinish(HST, - S.copyArray(llvm::makeArrayRef(Attrs)), + S.actOnHTMLStartTagFinish(HST, S.copyArray(llvm::ArrayRef(Attrs)), SourceLocation(), /* IsSelfClosing = */ false); return HST; default: // Not a token from an HTML start tag. Thus HTML tag prematurely ended. - S.actOnHTMLStartTagFinish(HST, - S.copyArray(llvm::makeArrayRef(Attrs)), + S.actOnHTMLStartTagFinish(HST, S.copyArray(llvm::ArrayRef(Attrs)), SourceLocation(), /* IsSelfClosing = */ false); bool StartLineInvalid; @@ -653,7 +809,7 @@ BlockContentComment *Parser::parseParagraphOrBlockCommand() { break; } - return S.actOnParagraphComment(S.copyArray(llvm::makeArrayRef(Content))); + return S.actOnParagraphComment(S.copyArray(llvm::ArrayRef(Content))); } VerbatimBlockComment *Parser::parseVerbatimBlock() { @@ -690,14 +846,13 @@ VerbatimBlockComment *Parser::parseVerbatimBlock() { if (Tok.is(tok::verbatim_block_end)) { const CommandInfo *Info = Traits.getCommandInfo(Tok.getVerbatimBlockID()); - S.actOnVerbatimBlockFinish(VB, Tok.getLocation(), - Info->Name, - S.copyArray(llvm::makeArrayRef(Lines))); + S.actOnVerbatimBlockFinish(VB, Tok.getLocation(), Info->Name, + S.copyArray(llvm::ArrayRef(Lines))); consumeToken(); } else { // Unterminated \\verbatim block S.actOnVerbatimBlockFinish(VB, SourceLocation(), "", - S.copyArray(llvm::makeArrayRef(Lines))); + S.copyArray(llvm::ArrayRef(Lines))); } return VB; @@ -773,7 +928,7 @@ FullComment *Parser::parseFullComment() { while (Tok.is(tok::newline)) consumeToken(); } - return S.actOnFullComment(S.copyArray(llvm::makeArrayRef(Blocks))); + return S.actOnFullComment(S.copyArray(llvm::ArrayRef(Blocks))); } } // end namespace comments diff --git a/contrib/llvm-project/clang/lib/AST/CommentSema.cpp b/contrib/llvm-project/clang/lib/AST/CommentSema.cpp index 7642e73fa171..bc01baa1d917 100644 --- a/contrib/llvm-project/clang/lib/AST/CommentSema.cpp +++ b/contrib/llvm-project/clang/lib/AST/CommentSema.cpp @@ -86,7 +86,7 @@ ParamCommandComment *Sema::actOnParamCommandStart( new (Allocator) ParamCommandComment(LocBegin, LocEnd, CommandID, CommandMarker); - if (!isFunctionDecl() && !isFunctionOrBlockPointerVarLikeDecl()) + if (!involvesFunctionType()) Diag(Command->getLocation(), diag::warn_doc_param_not_attached_to_a_function_decl) << CommandMarker @@ -219,12 +219,12 @@ void Sema::checkContainerDecl(const BlockCommandComment *Comment) { /// Turn a string into the corresponding PassDirection or -1 if it's not /// valid. -static int getParamPassDirection(StringRef Arg) { - return llvm::StringSwitch<int>(Arg) - .Case("[in]", ParamCommandComment::In) - .Case("[out]", ParamCommandComment::Out) - .Cases("[in,out]", "[out,in]", ParamCommandComment::InOut) - .Default(-1); +static ParamCommandPassDirection getParamPassDirection(StringRef Arg) { + return llvm::StringSwitch<ParamCommandPassDirection>(Arg) + .Case("[in]", ParamCommandPassDirection::In) + .Case("[out]", ParamCommandPassDirection::Out) + .Cases("[in,out]", "[out,in]", ParamCommandPassDirection::InOut) + .Default(static_cast<ParamCommandPassDirection>(-1)); } void Sema::actOnParamCommandDirectionArg(ParamCommandComment *Command, @@ -232,27 +232,25 @@ void Sema::actOnParamCommandDirectionArg(ParamCommandComment *Command, SourceLocation ArgLocEnd, StringRef Arg) { std::string ArgLower = Arg.lower(); - int Direction = getParamPassDirection(ArgLower); + ParamCommandPassDirection Direction = getParamPassDirection(ArgLower); - if (Direction == -1) { + if (Direction == static_cast<ParamCommandPassDirection>(-1)) { // Try again with whitespace removed. - ArgLower.erase( - std::remove_if(ArgLower.begin(), ArgLower.end(), clang::isWhitespace), - ArgLower.end()); + llvm::erase_if(ArgLower, clang::isWhitespace); Direction = getParamPassDirection(ArgLower); SourceRange ArgRange(ArgLocBegin, ArgLocEnd); - if (Direction != -1) { - const char *FixedName = ParamCommandComment::getDirectionAsString( - (ParamCommandComment::PassDirection)Direction); + if (Direction != static_cast<ParamCommandPassDirection>(-1)) { + const char *FixedName = + ParamCommandComment::getDirectionAsString(Direction); Diag(ArgLocBegin, diag::warn_doc_param_spaces_in_direction) << ArgRange << FixItHint::CreateReplacement(ArgRange, FixedName); } else { Diag(ArgLocBegin, diag::warn_doc_param_invalid_direction) << ArgRange; - Direction = ParamCommandComment::In; // Sane fall back. + Direction = ParamCommandPassDirection::In; // Sane fall back. } } - Command->setDirection((ParamCommandComment::PassDirection)Direction, + Command->setDirection(Direction, /*Explicit=*/true); } @@ -265,13 +263,12 @@ void Sema::actOnParamCommandParamNameArg(ParamCommandComment *Command, if (!Command->isDirectionExplicit()) { // User didn't provide a direction argument. - Command->setDirection(ParamCommandComment::In, /* Explicit = */ false); + Command->setDirection(ParamCommandPassDirection::In, + /* Explicit = */ false); } - typedef BlockCommandComment::Argument Argument; - Argument *A = new (Allocator) Argument(SourceRange(ArgLocBegin, - ArgLocEnd), - Arg); - Command->setArgs(llvm::makeArrayRef(A, 1)); + auto *A = new (Allocator) + Comment::Argument{SourceRange(ArgLocBegin, ArgLocEnd), Arg}; + Command->setArgs(llvm::ArrayRef(A, 1)); } void Sema::actOnParamCommandFinish(ParamCommandComment *Command, @@ -305,11 +302,9 @@ void Sema::actOnTParamCommandParamNameArg(TParamCommandComment *Command, // Parser will not feed us more arguments than needed. assert(Command->getNumArgs() == 0); - typedef BlockCommandComment::Argument Argument; - Argument *A = new (Allocator) Argument(SourceRange(ArgLocBegin, - ArgLocEnd), - Arg); - Command->setArgs(llvm::makeArrayRef(A, 1)); + auto *A = new (Allocator) + Comment::Argument{SourceRange(ArgLocBegin, ArgLocEnd), Arg}; + Command->setArgs(llvm::ArrayRef(A, 1)); if (!isTemplateOrSpecialization()) { // We already warned that this \\tparam is not attached to a template decl. @@ -320,7 +315,7 @@ void Sema::actOnTParamCommandParamNameArg(TParamCommandComment *Command, ThisDeclInfo->TemplateParameters; SmallVector<unsigned, 2> Position; if (resolveTParamReference(Arg, TemplateParameters, &Position)) { - Command->setPosition(copyArray(llvm::makeArrayRef(Position))); + Command->setPosition(copyArray(llvm::ArrayRef(Position))); TParamCommandComment *&PrevCommand = TemplateParameterDocs[Arg]; if (PrevCommand) { SourceRange ArgRange(ArgLocBegin, ArgLocEnd); @@ -363,37 +358,15 @@ void Sema::actOnTParamCommandFinish(TParamCommandComment *Command, checkBlockCommandEmptyParagraph(Command); } -InlineCommandComment *Sema::actOnInlineCommand(SourceLocation CommandLocBegin, - SourceLocation CommandLocEnd, - unsigned CommandID) { - ArrayRef<InlineCommandComment::Argument> Args; +InlineCommandComment * +Sema::actOnInlineCommand(SourceLocation CommandLocBegin, + SourceLocation CommandLocEnd, unsigned CommandID, + ArrayRef<Comment::Argument> Args) { StringRef CommandName = Traits.getCommandInfo(CommandID)->Name; - return new (Allocator) InlineCommandComment( - CommandLocBegin, - CommandLocEnd, - CommandID, - getInlineCommandRenderKind(CommandName), - Args); -} -InlineCommandComment *Sema::actOnInlineCommand(SourceLocation CommandLocBegin, - SourceLocation CommandLocEnd, - unsigned CommandID, - SourceLocation ArgLocBegin, - SourceLocation ArgLocEnd, - StringRef Arg) { - typedef InlineCommandComment::Argument Argument; - Argument *A = new (Allocator) Argument(SourceRange(ArgLocBegin, - ArgLocEnd), - Arg); - StringRef CommandName = Traits.getCommandInfo(CommandID)->Name; - - return new (Allocator) InlineCommandComment( - CommandLocBegin, - CommandLocEnd, - CommandID, - getInlineCommandRenderKind(CommandName), - llvm::makeArrayRef(A, 1)); + return new (Allocator) + InlineCommandComment(CommandLocBegin, CommandLocEnd, CommandID, + getInlineCommandRenderKind(CommandName), Args); } InlineContentComment *Sema::actOnUnknownCommand(SourceLocation LocBegin, @@ -408,9 +381,7 @@ InlineContentComment *Sema::actOnUnknownCommand(SourceLocation LocBegin, unsigned CommandID) { ArrayRef<InlineCommandComment::Argument> Args; return new (Allocator) InlineCommandComment( - LocBegin, LocEnd, CommandID, - InlineCommandComment::RenderNormal, - Args); + LocBegin, LocEnd, CommandID, InlineCommandRenderKind::Normal, Args); } TextComment *Sema::actOnText(SourceLocation LocBegin, @@ -590,7 +561,7 @@ void Sema::checkReturnsCommand(const BlockCommandComment *Command) { // to document the value that the property getter returns. if (isObjCPropertyDecl()) return; - if (isFunctionDecl() || isFunctionOrBlockPointerVarLikeDecl()) { + if (involvesFunctionType()) { assert(!ThisDeclInfo->ReturnType.isNull() && "should have a valid return type"); if (ThisDeclInfo->ReturnType->isVoidType()) { @@ -692,12 +663,12 @@ void Sema::checkDeprecatedCommand(const BlockCommandComment *Command) { return; const LangOptions &LO = FD->getLangOpts(); - const bool DoubleSquareBracket = LO.CPlusPlus14 || LO.C2x; + const bool DoubleSquareBracket = LO.CPlusPlus14 || LO.C23; StringRef AttributeSpelling = DoubleSquareBracket ? "[[deprecated]]" : "__attribute__((deprecated))"; if (PP) { // Try to find a replacement macro: - // - In C2x/C++14 we prefer [[deprecated]]. + // - In C23/C++14 we prefer [[deprecated]]. // - If not found or an older C/C++ look for __attribute__((deprecated)). StringRef MacroName; if (DoubleSquareBracket) { @@ -730,7 +701,7 @@ void Sema::checkDeprecatedCommand(const BlockCommandComment *Command) { } void Sema::resolveParamCommandIndexes(const FullComment *FC) { - if (!isFunctionDecl()) { + if (!involvesFunctionType()) { // We already warned that \\param commands are not attached to a function // decl. return; @@ -818,6 +789,14 @@ void Sema::resolveParamCommandIndexes(const FullComment *FC) { } } +bool Sema::involvesFunctionType() { + if (!ThisDeclInfo) + return false; + if (!ThisDeclInfo->IsFilled) + inspectThisDecl(); + return ThisDeclInfo->involvesFunctionType(); +} + bool Sema::isFunctionDecl() { if (!ThisDeclInfo) return false; @@ -832,26 +811,11 @@ bool Sema::isAnyFunctionDecl() { } bool Sema::isFunctionOrMethodVariadic() { - if (!isFunctionDecl() || !ThisDeclInfo->CurrentDecl) + if (!ThisDeclInfo) return false; - if (const FunctionDecl *FD = - dyn_cast<FunctionDecl>(ThisDeclInfo->CurrentDecl)) - return FD->isVariadic(); - if (const FunctionTemplateDecl *FTD = - dyn_cast<FunctionTemplateDecl>(ThisDeclInfo->CurrentDecl)) - return FTD->getTemplatedDecl()->isVariadic(); - if (const ObjCMethodDecl *MD = - dyn_cast<ObjCMethodDecl>(ThisDeclInfo->CurrentDecl)) - return MD->isVariadic(); - if (const TypedefNameDecl *TD = - dyn_cast<TypedefNameDecl>(ThisDeclInfo->CurrentDecl)) { - QualType Type = TD->getUnderlyingType(); - if (Type->isFunctionPointerType() || Type->isBlockPointerType()) - Type = Type->getPointeeType(); - if (const auto *FT = Type->getAs<FunctionProtoType>()) - return FT->isVariadic(); - } - return false; + if (!ThisDeclInfo->IsFilled) + inspectThisDecl(); + return ThisDeclInfo->IsVariadic; } bool Sema::isObjCMethodDecl() { @@ -873,36 +837,6 @@ bool Sema::isFunctionPointerVarDecl() { return false; } -bool Sema::isFunctionOrBlockPointerVarLikeDecl() { - if (!ThisDeclInfo) - return false; - if (!ThisDeclInfo->IsFilled) - inspectThisDecl(); - if (ThisDeclInfo->getKind() != DeclInfo::VariableKind || - !ThisDeclInfo->CurrentDecl) - return false; - QualType QT; - if (const auto *VD = dyn_cast<DeclaratorDecl>(ThisDeclInfo->CurrentDecl)) - QT = VD->getType(); - else if (const auto *PD = - dyn_cast<ObjCPropertyDecl>(ThisDeclInfo->CurrentDecl)) - QT = PD->getType(); - else - return false; - // We would like to warn about the 'returns'/'param' commands for - // variables that don't directly specify the function type, so type aliases - // can be ignored. - if (QT->getAs<TypedefType>()) - return false; - if (const auto *P = QT->getAs<PointerType>()) - if (P->getPointeeType()->getAs<TypedefType>()) - return false; - if (const auto *P = QT->getAs<BlockPointerType>()) - if (P->getPointeeType()->getAs<TypedefType>()) - return false; - return QT->isFunctionPointerType() || QT->isBlockPointerType(); -} - bool Sema::isObjCPropertyDecl() { if (!ThisDeclInfo) return false; @@ -1173,16 +1107,15 @@ StringRef Sema::correctTypoInTParamReference( return StringRef(); } -InlineCommandComment::RenderKind -Sema::getInlineCommandRenderKind(StringRef Name) const { +InlineCommandRenderKind Sema::getInlineCommandRenderKind(StringRef Name) const { assert(Traits.getCommandInfo(Name)->IsInlineCommand); - return llvm::StringSwitch<InlineCommandComment::RenderKind>(Name) - .Case("b", InlineCommandComment::RenderBold) - .Cases("c", "p", InlineCommandComment::RenderMonospaced) - .Cases("a", "e", "em", InlineCommandComment::RenderEmphasized) - .Case("anchor", InlineCommandComment::RenderAnchor) - .Default(InlineCommandComment::RenderNormal); + return llvm::StringSwitch<InlineCommandRenderKind>(Name) + .Case("b", InlineCommandRenderKind::Bold) + .Cases("c", "p", InlineCommandRenderKind::Monospaced) + .Cases("a", "e", "em", InlineCommandRenderKind::Emphasized) + .Case("anchor", InlineCommandRenderKind::Anchor) + .Default(InlineCommandRenderKind::Normal); } } // end namespace comments diff --git a/contrib/llvm-project/clang/lib/AST/ComparisonCategories.cpp b/contrib/llvm-project/clang/lib/AST/ComparisonCategories.cpp index 896050482644..28244104d663 100644 --- a/contrib/llvm-project/clang/lib/AST/ComparisonCategories.cpp +++ b/contrib/llvm-project/clang/lib/AST/ComparisonCategories.cpp @@ -17,10 +17,11 @@ #include "clang/AST/DeclCXX.h" #include "clang/AST/Type.h" #include "llvm/ADT/SmallVector.h" +#include <optional> using namespace clang; -Optional<ComparisonCategoryType> +std::optional<ComparisonCategoryType> clang::getComparisonCategoryForBuiltinCmp(QualType T) { using CCT = ComparisonCategoryType; @@ -37,7 +38,7 @@ clang::getComparisonCategoryForBuiltinCmp(QualType T) { return CCT::StrongOrdering; // TODO: Extend support for operator<=> to ObjC types. - return llvm::None; + return std::nullopt; } bool ComparisonCategoryInfo::ValueInfo::hasValidIntValue() const { @@ -47,7 +48,7 @@ bool ComparisonCategoryInfo::ValueInfo::hasValidIntValue() const { // Before we attempt to get the value of the first field, ensure that we // actually have one (and only one) field. - auto *Record = VD->getType()->getAsCXXRecordDecl(); + const auto *Record = VD->getType()->getAsCXXRecordDecl(); if (std::distance(Record->field_begin(), Record->field_end()) != 1 || !Record->field_begin()->getType()->isIntegralOrEnumerationType()) return false; @@ -57,7 +58,7 @@ bool ComparisonCategoryInfo::ValueInfo::hasValidIntValue() const { /// Attempt to determine the integer value used to represent the comparison /// category result by evaluating the initializer for the specified VarDecl as -/// a constant expression and retreiving the value of the class's first +/// a constant expression and retrieving the value of the class's first /// (and only) field. /// /// Note: The STL types are expected to have the form: @@ -97,13 +98,13 @@ static const NamespaceDecl *lookupStdNamespace(const ASTContext &Ctx, return StdNS; } -static CXXRecordDecl *lookupCXXRecordDecl(const ASTContext &Ctx, - const NamespaceDecl *StdNS, - ComparisonCategoryType Kind) { +static const CXXRecordDecl *lookupCXXRecordDecl(const ASTContext &Ctx, + const NamespaceDecl *StdNS, + ComparisonCategoryType Kind) { StringRef Name = ComparisonCategories::getCategoryString(Kind); DeclContextLookupResult Lookup = StdNS->lookup(&Ctx.Idents.get(Name)); if (!Lookup.empty()) - if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Lookup.front())) + if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Lookup.front())) return RD; return nullptr; } @@ -115,7 +116,7 @@ ComparisonCategories::lookupInfo(ComparisonCategoryType Kind) const { return &It->second; if (const NamespaceDecl *NS = lookupStdNamespace(Ctx, StdNS)) - if (CXXRecordDecl *RD = lookupCXXRecordDecl(Ctx, NS, Kind)) + if (const CXXRecordDecl *RD = lookupCXXRecordDecl(Ctx, NS, Kind)) return &Data.try_emplace((char)Kind, Ctx, RD, Kind).first->second; return nullptr; @@ -125,13 +126,13 @@ const ComparisonCategoryInfo * ComparisonCategories::lookupInfoForType(QualType Ty) const { assert(!Ty.isNull() && "type must be non-null"); using CCT = ComparisonCategoryType; - auto *RD = Ty->getAsCXXRecordDecl(); + const auto *RD = Ty->getAsCXXRecordDecl(); if (!RD) return nullptr; // Check to see if we have information for the specified type cached. const auto *CanonRD = RD->getCanonicalDecl(); - for (auto &KV : Data) { + for (const auto &KV : Data) { const ComparisonCategoryInfo &Info = KV.second; if (CanonRD == Info.Record->getCanonicalDecl()) return &Info; diff --git a/contrib/llvm-project/clang/lib/AST/ComputeDependence.cpp b/contrib/llvm-project/clang/lib/AST/ComputeDependence.cpp index 5648cf2103d6..62ca15ea398f 100644 --- a/contrib/llvm-project/clang/lib/AST/ComputeDependence.cpp +++ b/contrib/llvm-project/clang/lib/AST/ComputeDependence.cpp @@ -26,7 +26,7 @@ ExprDependence clang::computeDependence(FullExpr *E) { } ExprDependence clang::computeDependence(OpaqueValueExpr *E) { - auto D = toExprDependence(E->getType()->getDependence()); + auto D = toExprDependenceForImpliedType(E->getType()->getDependence()); if (auto *S = E->getSourceExpr()) D |= S->getDependence(); assert(!(D & ExprDependence::UnexpandedPack)); @@ -39,8 +39,10 @@ ExprDependence clang::computeDependence(ParenExpr *E) { ExprDependence clang::computeDependence(UnaryOperator *E, const ASTContext &Ctx) { - ExprDependence Dep = toExprDependence(E->getType()->getDependence()) | - E->getSubExpr()->getDependence(); + ExprDependence Dep = + // FIXME: Do we need to look at the type? + toExprDependenceForImpliedType(E->getType()->getDependence()) | + E->getSubExpr()->getDependence(); // C++ [temp.dep.constexpr]p5: // An expression of the form & qualified-id where the qualified-id names a @@ -77,7 +79,7 @@ ExprDependence clang::computeDependence(UnaryExprOrTypeTraitExpr *E) { // Value-dependent if the argument is type-dependent. if (E->isArgumentType()) return turnTypeToValueDependence( - toExprDependence(E->getArgumentType()->getDependence())); + toExprDependenceAsWritten(E->getArgumentType()->getDependence())); auto ArgDeps = E->getArgumentExpr()->getDependence(); auto Deps = ArgDeps & ~ExprDependence::TypeValue; @@ -120,21 +122,36 @@ ExprDependence clang::computeDependence(MatrixSubscriptExpr *E) { } ExprDependence clang::computeDependence(CompoundLiteralExpr *E) { - return toExprDependence(E->getTypeSourceInfo()->getType()->getDependence()) | + return toExprDependenceAsWritten( + E->getTypeSourceInfo()->getType()->getDependence()) | + toExprDependenceForImpliedType(E->getType()->getDependence()) | turnTypeToValueDependence(E->getInitializer()->getDependence()); } -ExprDependence clang::computeDependence(CastExpr *E) { +ExprDependence clang::computeDependence(ImplicitCastExpr *E) { + // We model implicit conversions as combining the dependence of their + // subexpression, apart from its type, with the semantic portion of the + // target type. + ExprDependence D = + toExprDependenceForImpliedType(E->getType()->getDependence()); + if (auto *S = E->getSubExpr()) + D |= S->getDependence() & ~ExprDependence::Type; + return D; +} + +ExprDependence clang::computeDependence(ExplicitCastExpr *E) { // Cast expressions are type-dependent if the type is // dependent (C++ [temp.dep.expr]p3). // Cast expressions are value-dependent if the type is // dependent or if the subexpression is value-dependent. - auto D = toExprDependence(E->getType()->getDependence()); - if (E->getStmtClass() == Stmt::ImplicitCastExprClass) { - // An implicit cast expression doesn't (lexically) contain an - // unexpanded pack, even if its target type does. - D &= ~ExprDependence::UnexpandedPack; - } + // + // Note that we also need to consider the dependence of the actual type here, + // because when the type as written is a deduced type, that type is not + // dependent, but it may be deduced as a dependent type. + ExprDependence D = + toExprDependenceAsWritten( + cast<ExplicitCastExpr>(E)->getTypeAsWritten()->getDependence()) | + toExprDependenceForImpliedType(E->getType()->getDependence()); if (auto *S = E->getSubExpr()) D |= S->getDependence() & ~ExprDependence::Type; return D; @@ -158,7 +175,7 @@ ExprDependence clang::computeDependence(BinaryConditionalOperator *E) { } ExprDependence clang::computeDependence(StmtExpr *E, unsigned TemplateDepth) { - auto D = toExprDependence(E->getType()->getDependence()); + auto D = toExprDependenceForImpliedType(E->getType()->getDependence()); // Propagate dependence of the result. if (const auto *CompoundExprResult = dyn_cast_or_null<ValueStmt>(E->getSubStmt()->getStmtExprResult())) @@ -174,7 +191,8 @@ ExprDependence clang::computeDependence(StmtExpr *E, unsigned TemplateDepth) { } ExprDependence clang::computeDependence(ConvertVectorExpr *E) { - auto D = toExprDependence(E->getType()->getDependence()) | + auto D = toExprDependenceAsWritten( + E->getTypeSourceInfo()->getType()->getDependence()) | E->getSrcExpr()->getDependence(); if (!E->getType()->isDependentType()) D &= ~ExprDependence::Type; @@ -206,14 +224,14 @@ ExprDependence clang::computeDependence(ParenListExpr *P) { } ExprDependence clang::computeDependence(VAArgExpr *E) { - auto D = - toExprDependence(E->getWrittenTypeInfo()->getType()->getDependence()) | - (E->getSubExpr()->getDependence() & ~ExprDependence::Type); - return D & ~ExprDependence::Value; + auto D = toExprDependenceAsWritten( + E->getWrittenTypeInfo()->getType()->getDependence()) | + (E->getSubExpr()->getDependence() & ~ExprDependence::Type); + return D; } ExprDependence clang::computeDependence(NoInitExpr *E) { - return toExprDependence(E->getType()->getDependence()) & + return toExprDependenceForImpliedType(E->getType()->getDependence()) & (ExprDependence::Instantiation | ExprDependence::Error); } @@ -226,7 +244,7 @@ ExprDependence clang::computeDependence(ArrayInitLoopExpr *E) { } ExprDependence clang::computeDependence(ImplicitValueInitExpr *E) { - return toExprDependence(E->getType()->getDependence()) & + return toExprDependenceForImpliedType(E->getType()->getDependence()) & ExprDependence::Instantiation; } @@ -235,14 +253,16 @@ ExprDependence clang::computeDependence(ExtVectorElementExpr *E) { } ExprDependence clang::computeDependence(BlockExpr *E) { - auto D = toExprDependence(E->getType()->getDependence()); + auto D = toExprDependenceForImpliedType(E->getType()->getDependence()); if (E->getBlockDecl()->isDependentContext()) D |= ExprDependence::Instantiation; - return D & ~ExprDependence::UnexpandedPack; + return D; } ExprDependence clang::computeDependence(AsTypeExpr *E) { - auto D = toExprDependence(E->getType()->getDependence()) | + // FIXME: AsTypeExpr doesn't store the type as written. Assume the expression + // type has identical sugar for now, so is a type-as-written. + auto D = toExprDependenceAsWritten(E->getType()->getDependence()) | E->getSrcExpr()->getDependence(); if (!E->getType()->isDependentType()) D &= ~ExprDependence::Type; @@ -255,15 +275,14 @@ ExprDependence clang::computeDependence(CXXRewrittenBinaryOperator *E) { ExprDependence clang::computeDependence(CXXStdInitializerListExpr *E) { auto D = turnTypeToValueDependence(E->getSubExpr()->getDependence()); - D |= toExprDependence(E->getType()->getDependence()) & - (ExprDependence::Type | ExprDependence::Error); + D |= toExprDependenceForImpliedType(E->getType()->getDependence()); return D; } ExprDependence clang::computeDependence(CXXTypeidExpr *E) { auto D = ExprDependence::None; if (E->isTypeOperand()) - D = toExprDependence( + D = toExprDependenceAsWritten( E->getTypeOperandSourceInfo()->getType()->getDependence()); else D = turnTypeToValueDependence(E->getExprOperand()->getDependence()); @@ -281,7 +300,7 @@ ExprDependence clang::computeDependence(MSPropertySubscriptExpr *E) { ExprDependence clang::computeDependence(CXXUuidofExpr *E) { if (E->isTypeOperand()) - return turnTypeToValueDependence(toExprDependence( + return turnTypeToValueDependence(toExprDependenceAsWritten( E->getTypeOperandSourceInfo()->getType()->getDependence())); return turnTypeToValueDependence(E->getExprOperand()->getDependence()); @@ -290,7 +309,17 @@ ExprDependence clang::computeDependence(CXXUuidofExpr *E) { ExprDependence clang::computeDependence(CXXThisExpr *E) { // 'this' is type-dependent if the class type of the enclosing // member function is dependent (C++ [temp.dep.expr]p2) - auto D = toExprDependence(E->getType()->getDependence()); + auto D = toExprDependenceForImpliedType(E->getType()->getDependence()); + + // If a lambda with an explicit object parameter captures '*this', then + // 'this' now refers to the captured copy of lambda, and if the lambda + // is type-dependent, so is the object and thus 'this'. + // + // Note: The standard does not mention this case explicitly, but we need + // to do this so we can mark NSDM accesses as dependent. + if (E->isCapturedByCopyInLambdaWithExplicitObjectParameter()) + D |= ExprDependence::Type; + assert(!(D & ExprDependence::UnexpandedPack)); return D; } @@ -307,8 +336,10 @@ ExprDependence clang::computeDependence(CXXBindTemporaryExpr *E) { } ExprDependence clang::computeDependence(CXXScalarValueInitExpr *E) { - return toExprDependence(E->getType()->getDependence()) & - ~ExprDependence::TypeValue; + auto D = toExprDependenceForImpliedType(E->getType()->getDependence()); + if (auto *TSI = E->getTypeSourceInfo()) + D |= toExprDependenceAsWritten(TSI->getType()->getDependence()); + return D; } ExprDependence clang::computeDependence(CXXDeleteExpr *E) { @@ -316,7 +347,7 @@ ExprDependence clang::computeDependence(CXXDeleteExpr *E) { } ExprDependence clang::computeDependence(ArrayTypeTraitExpr *E) { - auto D = toExprDependence(E->getQueriedType()->getDependence()); + auto D = toExprDependenceAsWritten(E->getQueriedType()->getDependence()); if (auto *Dim = E->getDimensionExpression()) D |= Dim->getDependence(); return turnTypeToValueDependence(D); @@ -343,6 +374,28 @@ ExprDependence clang::computeDependence(PackExpansionExpr *E) { ExprDependence::TypeValueInstantiation; } +ExprDependence clang::computeDependence(PackIndexingExpr *E) { + + ExprDependence PatternDep = E->getPackIdExpression()->getDependence() & + ~ExprDependence::UnexpandedPack; + + ExprDependence D = E->getIndexExpr()->getDependence(); + if (D & ExprDependence::TypeValueInstantiation) + D |= E->getIndexExpr()->getDependence() | PatternDep | + ExprDependence::Instantiation; + + ArrayRef<Expr *> Exprs = E->getExpressions(); + if (Exprs.empty()) + D |= PatternDep | ExprDependence::Instantiation; + + else if (!E->getIndexExpr()->isInstantiationDependent()) { + std::optional<unsigned> Index = E->getSelectedIndex(); + assert(Index && *Index < Exprs.size() && "pack index out of bound"); + D |= Exprs[*Index]->getDependence(); + } + return D; +} + ExprDependence clang::computeDependence(SubstNonTypeTemplateParmExpr *E) { return E->getReplacement()->getDependence(); } @@ -366,7 +419,7 @@ ExprDependence clang::computeDependence(ObjCBoxedExpr *E) { } ExprDependence clang::computeDependence(ObjCEncodeExpr *E) { - return toExprDependence(E->getEncodedType()->getDependence()); + return toExprDependenceAsWritten(E->getEncodedType()->getDependence()); } ExprDependence clang::computeDependence(ObjCIvarRefExpr *E) { @@ -377,7 +430,8 @@ ExprDependence clang::computeDependence(ObjCPropertyRefExpr *E) { if (E->isObjectReceiver()) return E->getBase()->getDependence() & ~ExprDependence::Type; if (E->isSuperReceiver()) - return toExprDependence(E->getSuperReceiverType()->getDependence()) & + return toExprDependenceForImpliedType( + E->getSuperReceiverType()->getDependence()) & ~ExprDependence::TypeValue; assert(E->isClassReceiver()); return ExprDependence::None; @@ -396,29 +450,37 @@ ExprDependence clang::computeDependence(ObjCIndirectCopyRestoreExpr *E) { return E->getSubExpr()->getDependence(); } -ExprDependence clang::computeDependence(OMPArraySectionExpr *E) { +ExprDependence clang::computeDependence(ArraySectionExpr *E) { auto D = E->getBase()->getDependence(); if (auto *LB = E->getLowerBound()) D |= LB->getDependence(); if (auto *Len = E->getLength()) D |= Len->getDependence(); + + if (E->isOMPArraySection()) { + if (auto *Stride = E->getStride()) + D |= Stride->getDependence(); + } return D; } ExprDependence clang::computeDependence(OMPArrayShapingExpr *E) { - auto D = E->getBase()->getDependence() | - toExprDependence(E->getType()->getDependence()); + auto D = E->getBase()->getDependence(); for (Expr *Dim: E->getDimensions()) if (Dim) - D |= Dim->getDependence(); + D |= turnValueToTypeDependence(Dim->getDependence()); return D; } ExprDependence clang::computeDependence(OMPIteratorExpr *E) { - auto D = toExprDependence(E->getType()->getDependence()); + auto D = toExprDependenceForImpliedType(E->getType()->getDependence()); for (unsigned I = 0, End = E->numOfIterators(); I < End; ++I) { - if (auto *VD = cast_or_null<ValueDecl>(E->getIteratorDecl(I))) - D |= toExprDependence(VD->getType()->getDependence()); + if (auto *DD = cast_or_null<DeclaratorDecl>(E->getIteratorDecl(I))) { + // If the type is omitted, it's 'int', and is not dependent in any way. + if (auto *TSI = DD->getTypeSourceInfo()) { + D |= toExprDependenceAsWritten(TSI->getType()->getDependence()); + } + } OMPIteratorExpr::IteratorRange IR = E->getIteratorRange(I); if (Expr *BE = IR.Begin) D |= BE->getDependence(); @@ -451,22 +513,32 @@ ExprDependence clang::computeDependence(DeclRefExpr *E, const ASTContext &Ctx) { if (Decl->isParameterPack()) Deps |= ExprDependence::UnexpandedPack; - Deps |= toExprDependence(Type->getDependence()) & ExprDependence::Error; + Deps |= toExprDependenceForImpliedType(Type->getDependence()) & + ExprDependence::Error; // C++ [temp.dep.expr]p3: // An id-expression is type-dependent if it contains: // - an identifier associated by name lookup with one or more declarations // declared with a dependent type + // - an identifier associated by name lookup with an entity captured by + // copy ([expr.prim.lambda.capture]) + // in a lambda-expression that has an explicit object parameter whose + // type is dependent ([dcl.fct]), // // [The "or more" case is not modeled as a DeclRefExpr. There are a bunch // more bullets here that we handle by treating the declaration as having a // dependent type if they involve a placeholder type that can't be deduced.] if (Type->isDependentType()) - return Deps | ExprDependence::TypeValueInstantiation; + Deps |= ExprDependence::TypeValueInstantiation; else if (Type->isInstantiationDependentType()) Deps |= ExprDependence::Instantiation; + // - an identifier associated by name lookup with an entity captured by + // copy ([expr.prim.lambda.capture]) + if (E->isCapturedByCopyInLambdaWithExplicitObjectParameter()) + Deps |= ExprDependence::Type; + // - a conversion-function-id that specifies a dependent type if (Decl->getDeclName().getNameKind() == DeclarationName::CXXConversionFunctionName) { @@ -499,13 +571,13 @@ ExprDependence clang::computeDependence(DeclRefExpr *E, const ASTContext &Ctx) { // - it names a potentially-constant variable that is initialized with an // expression that is value-dependent if (const auto *Var = dyn_cast<VarDecl>(Decl)) { - if (Var->mightBeUsableInConstantExpressions(Ctx)) { - if (const Expr *Init = Var->getAnyInitializer()) { - if (Init->isValueDependent()) - Deps |= ExprDependence::ValueInstantiation; - if (Init->containsErrors()) - Deps |= ExprDependence::Error; - } + if (const Expr *Init = Var->getAnyInitializer()) { + if (Init->containsErrors()) + Deps |= ExprDependence::Error; + + if (Var->mightBeUsableInConstantExpressions(Ctx) && + Init->isValueDependent()) + Deps |= ExprDependence::ValueInstantiation; } // - it names a static data member that is a dependent member of the @@ -547,7 +619,7 @@ ExprDependence clang::computeDependence(RecoveryExpr *E) { // - type-dependent if we don't know the type (fallback to an opaque // dependent type), or the type is known and dependent, or it has // type-dependent subexpressions. - auto D = toExprDependence(E->getType()->getDependence()) | + auto D = toExprDependenceAsWritten(E->getType()->getDependence()) | ExprDependence::ErrorDependent; // FIXME: remove the type-dependent bit from subexpressions, if the // RecoveryExpr has a non-dependent type. @@ -557,18 +629,20 @@ ExprDependence clang::computeDependence(RecoveryExpr *E) { } ExprDependence clang::computeDependence(SYCLUniqueStableNameExpr *E) { - return toExprDependence(E->getTypeSourceInfo()->getType()->getDependence()); + return toExprDependenceAsWritten( + E->getTypeSourceInfo()->getType()->getDependence()); } ExprDependence clang::computeDependence(PredefinedExpr *E) { - return toExprDependence(E->getType()->getDependence()) & - ~ExprDependence::UnexpandedPack; + return toExprDependenceForImpliedType(E->getType()->getDependence()); } ExprDependence clang::computeDependence(CallExpr *E, llvm::ArrayRef<Expr *> PreArgs) { auto D = E->getCallee()->getDependence(); - for (auto *A : llvm::makeArrayRef(E->getArgs(), E->getNumArgs())) { + if (E->getType()->isDependentType()) + D |= ExprDependence::Type; + for (auto *A : llvm::ArrayRef(E->getArgs(), E->getNumArgs())) { if (A) D |= A->getDependence(); } @@ -578,16 +652,34 @@ ExprDependence clang::computeDependence(CallExpr *E, } ExprDependence clang::computeDependence(OffsetOfExpr *E) { - auto D = turnTypeToValueDependence( - toExprDependence(E->getTypeSourceInfo()->getType()->getDependence())); + auto D = turnTypeToValueDependence(toExprDependenceAsWritten( + E->getTypeSourceInfo()->getType()->getDependence())); for (unsigned I = 0, N = E->getNumExpressions(); I < N; ++I) D |= turnTypeToValueDependence(E->getIndexExpr(I)->getDependence()); return D; } +static inline ExprDependence getDependenceInExpr(DeclarationNameInfo Name) { + auto D = ExprDependence::None; + if (Name.isInstantiationDependent()) + D |= ExprDependence::Instantiation; + if (Name.containsUnexpandedParameterPack()) + D |= ExprDependence::UnexpandedPack; + return D; +} + ExprDependence clang::computeDependence(MemberExpr *E) { - auto *MemberDecl = E->getMemberDecl(); auto D = E->getBase()->getDependence(); + D |= getDependenceInExpr(E->getMemberNameInfo()); + + if (auto *NNS = E->getQualifier()) + D |= toExprDependence(NNS->getDependence() & + ~NestedNameSpecifierDependence::Dependent); + + for (const auto &A : E->template_arguments()) + D |= toExprDependence(A.getArgument().getDependence()); + + auto *MemberDecl = E->getMemberDecl(); if (FieldDecl *FD = dyn_cast<FieldDecl>(MemberDecl)) { DeclContext *DC = MemberDecl->getDeclContext(); // dyn_cast_or_null is used to handle objC variables which do not @@ -603,7 +695,6 @@ ExprDependence clang::computeDependence(MemberExpr *E) { D |= ExprDependence::Type; } } - // FIXME: move remaining dependence computation from MemberExpr::Create() return D; } @@ -615,8 +706,8 @@ ExprDependence clang::computeDependence(InitListExpr *E) { } ExprDependence clang::computeDependence(ShuffleVectorExpr *E) { - auto D = toExprDependence(E->getType()->getDependence()); - for (auto *C : llvm::makeArrayRef(E->getSubExprs(), E->getNumSubExprs())) + auto D = toExprDependenceForImpliedType(E->getType()->getDependence()); + for (auto *C : llvm::ArrayRef(E->getSubExprs(), E->getNumSubExprs())) D |= C->getDependence(); return D; } @@ -627,7 +718,12 @@ ExprDependence clang::computeDependence(GenericSelectionExpr *E, : ExprDependence::None; for (auto *AE : E->getAssocExprs()) D |= AE->getDependence() & ExprDependence::Error; - D |= E->getControllingExpr()->getDependence() & ExprDependence::Error; + + if (E->isExprPredicate()) + D |= E->getControllingExpr()->getDependence() & ExprDependence::Error; + else + D |= toExprDependenceAsWritten( + E->getControllingType()->getType()->getDependence()); if (E->isResultDependent()) return D | ExprDependence::TypeValueInstantiation; @@ -637,7 +733,7 @@ ExprDependence clang::computeDependence(GenericSelectionExpr *E, ExprDependence clang::computeDependence(DesignatedInitExpr *E) { auto Deps = E->getInit()->getDependence(); - for (auto D : E->designators()) { + for (const auto &D : E->designators()) { auto DesignatorDeps = ExprDependence::None; if (D.isArrayDesignator()) DesignatorDeps |= E->getArrayIndex(D)->getDependence(); @@ -660,15 +756,17 @@ ExprDependence clang::computeDependence(PseudoObjectExpr *O) { ExprDependence clang::computeDependence(AtomicExpr *A) { auto D = ExprDependence::None; - for (auto *E : llvm::makeArrayRef(A->getSubExprs(), A->getNumSubExprs())) + for (auto *E : llvm::ArrayRef(A->getSubExprs(), A->getNumSubExprs())) D |= E->getDependence(); return D; } ExprDependence clang::computeDependence(CXXNewExpr *E) { - auto D = toExprDependence(E->getType()->getDependence()); + auto D = toExprDependenceAsWritten( + E->getAllocatedTypeSourceInfo()->getType()->getDependence()); + D |= toExprDependenceForImpliedType(E->getAllocatedType()->getDependence()); auto Size = E->getArraySize(); - if (Size.hasValue() && *Size) + if (Size && *Size) D |= turnTypeToValueDependence((*Size)->getDependence()); if (auto *I = E->getInitializer()) D |= turnTypeToValueDependence(I->getDependence()); @@ -679,26 +777,17 @@ ExprDependence clang::computeDependence(CXXNewExpr *E) { ExprDependence clang::computeDependence(CXXPseudoDestructorExpr *E) { auto D = E->getBase()->getDependence(); - if (!E->getDestroyedType().isNull()) - D |= toExprDependence(E->getDestroyedType()->getDependence()); + if (auto *TSI = E->getDestroyedTypeInfo()) + D |= toExprDependenceAsWritten(TSI->getType()->getDependence()); if (auto *ST = E->getScopeTypeInfo()) D |= turnTypeToValueDependence( - toExprDependence(ST->getType()->getDependence())); + toExprDependenceAsWritten(ST->getType()->getDependence())); if (auto *Q = E->getQualifier()) D |= toExprDependence(Q->getDependence() & ~NestedNameSpecifierDependence::Dependent); return D; } -static inline ExprDependence getDependenceInExpr(DeclarationNameInfo Name) { - auto D = ExprDependence::None; - if (Name.isInstantiationDependent()) - D |= ExprDependence::Instantiation; - if (Name.containsUnexpandedParameterPack()) - D |= ExprDependence::UnexpandedPack; - return D; -} - ExprDependence clang::computeDependence(OverloadExpr *E, bool KnownDependent, bool KnownInstantiationDependent, @@ -722,7 +811,7 @@ clang::computeDependence(OverloadExpr *E, bool KnownDependent, // If we have explicit template arguments, check for dependent // template arguments and whether they contain any unexpanded pack // expansions. - for (auto A : E->template_arguments()) + for (const auto &A : E->template_arguments()) Deps |= toExprDependence(A.getArgument().getDependence()); return Deps; } @@ -732,18 +821,26 @@ ExprDependence clang::computeDependence(DependentScopeDeclRefExpr *E) { D |= getDependenceInExpr(E->getNameInfo()); if (auto *Q = E->getQualifier()) D |= toExprDependence(Q->getDependence()); - for (auto A : E->template_arguments()) + for (const auto &A : E->template_arguments()) D |= toExprDependence(A.getArgument().getDependence()); return D; } ExprDependence clang::computeDependence(CXXConstructExpr *E) { - auto D = toExprDependence(E->getType()->getDependence()); + ExprDependence D = + toExprDependenceForImpliedType(E->getType()->getDependence()); for (auto *A : E->arguments()) D |= A->getDependence() & ~ExprDependence::Type; return D; } +ExprDependence clang::computeDependence(CXXTemporaryObjectExpr *E) { + CXXConstructExpr *BaseE = E; + return toExprDependenceAsWritten( + E->getTypeSourceInfo()->getType()->getDependence()) | + computeDependence(BaseE); +} + ExprDependence clang::computeDependence(CXXDefaultInitExpr *E) { return E->getExpr()->getDependence(); } @@ -754,7 +851,7 @@ ExprDependence clang::computeDependence(CXXDefaultArgExpr *E) { ExprDependence clang::computeDependence(LambdaExpr *E, bool ContainsUnexpandedParameterPack) { - auto D = toExprDependence(E->getType()->getDependence()); + auto D = toExprDependenceForImpliedType(E->getType()->getDependence()); if (ContainsUnexpandedParameterPack) D |= ExprDependence::UnexpandedPack; return D; @@ -762,7 +859,8 @@ ExprDependence clang::computeDependence(LambdaExpr *E, ExprDependence clang::computeDependence(CXXUnresolvedConstructExpr *E) { auto D = ExprDependence::ValueInstantiation; - D |= toExprDependence(E->getType()->getDependence()); + D |= toExprDependenceAsWritten(E->getTypeAsWritten()->getDependence()); + D |= toExprDependenceForImpliedType(E->getType()->getDependence()); for (auto *A : E->arguments()) D |= A->getDependence() & (ExprDependence::UnexpandedPack | ExprDependence::Error); @@ -776,7 +874,7 @@ ExprDependence clang::computeDependence(CXXDependentScopeMemberExpr *E) { if (auto *Q = E->getQualifier()) D |= toExprDependence(Q->getDependence()); D |= getDependenceInExpr(E->getMemberNameInfo()); - for (auto A : E->template_arguments()) + for (const auto &A : E->template_arguments()) D |= toExprDependence(A.getArgument().getDependence()); return D; } @@ -794,11 +892,18 @@ ExprDependence clang::computeDependence(CXXFoldExpr *E) { return D; } +ExprDependence clang::computeDependence(CXXParenListInitExpr *E) { + auto D = ExprDependence::None; + for (const auto *A : E->getInitExprs()) + D |= A->getDependence(); + return D; +} + ExprDependence clang::computeDependence(TypeTraitExpr *E) { auto D = ExprDependence::None; for (const auto *A : E->getArgs()) - D |= - toExprDependence(A->getType()->getDependence()) & ~ExprDependence::Type; + D |= toExprDependenceAsWritten(A->getType()->getDependence()) & + ~ExprDependence::Type; return D; } @@ -816,7 +921,10 @@ ExprDependence clang::computeDependence(ConceptSpecializationExpr *E, ExprDependence D = ValueDependent ? ExprDependence::Value : ExprDependence::None; - return D | toExprDependence(TA); + auto Res = D | toExprDependence(TA); + if(!ValueDependent && E->getSatisfaction().ContainsErrors) + Res |= ExprDependence::Error; + return Res; } ExprDependence clang::computeDependence(ObjCArrayLiteral *E) { @@ -845,7 +953,7 @@ ExprDependence clang::computeDependence(ObjCMessageExpr *E) { if (auto *R = E->getInstanceReceiver()) D |= R->getDependence(); else - D |= toExprDependence(E->getType()->getDependence()); + D |= toExprDependenceForImpliedType(E->getType()->getDependence()); for (auto *A : E->arguments()) D |= A->getDependence(); return D; diff --git a/contrib/llvm-project/clang/lib/AST/Decl.cpp b/contrib/llvm-project/clang/lib/AST/Decl.cpp index 959a7c415c58..bc7cce0bcd7f 100644 --- a/contrib/llvm-project/clang/lib/AST/Decl.cpp +++ b/contrib/llvm-project/clang/lib/AST/Decl.cpp @@ -30,6 +30,8 @@ #include "clang/AST/ODRHash.h" #include "clang/AST/PrettyDeclStackTrace.h" #include "clang/AST/PrettyPrinter.h" +#include "clang/AST/Randstruct.h" +#include "clang/AST/RecordLayout.h" #include "clang/AST/Redeclarable.h" #include "clang/AST/Stmt.h" #include "clang/AST/TemplateBase.h" @@ -52,21 +54,20 @@ #include "clang/Basic/Visibility.h" #include "llvm/ADT/APSInt.h" #include "llvm/ADT/ArrayRef.h" -#include "llvm/ADT/None.h" -#include "llvm/ADT/Optional.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/StringSwitch.h" -#include "llvm/ADT/Triple.h" #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" +#include "llvm/TargetParser/Triple.h" #include <algorithm> #include <cassert> #include <cstddef> #include <cstring> #include <memory> +#include <optional> #include <string> #include <tuple> #include <type_traits> @@ -86,7 +87,7 @@ void PrettyDeclStackTraceEntry::print(raw_ostream &OS) const { } OS << Message; - if (auto *ND = dyn_cast_or_null<NamedDecl>(TheDecl)) { + if (auto *ND = dyn_cast_if_present<NamedDecl>(TheDecl)) { OS << " '"; ND->getNameForDiagnostic(OS, Context.getPrintingPolicy(), true); OS << "'"; @@ -168,8 +169,8 @@ withExplicitVisibilityAlready(LVComputationKind Kind) { return Kind; } -static Optional<Visibility> getExplicitVisibility(const NamedDecl *D, - LVComputationKind kind) { +static std::optional<Visibility> getExplicitVisibility(const NamedDecl *D, + LVComputationKind kind) { assert(!kind.IgnoreExplicitVisibility && "asking for explicit visibility when we shouldn't be"); return D->getExplicitVisibility(kind.getExplicitVisibilityKind()); @@ -185,8 +186,8 @@ static bool usesTypeVisibility(const NamedDecl *D) { /// Does the given declaration have member specialization information, /// and if so, is it an explicit specialization? -template <class T> static typename -std::enable_if<!std::is_base_of<RedeclarableTemplateDecl, T>::value, bool>::type +template <class T> +static std::enable_if_t<!std::is_base_of_v<RedeclarableTemplateDecl, T>, bool> isExplicitMemberSpecialization(const T *D) { if (const MemberSpecializationInfo *member = D->getMemberSpecializationInfo()) { @@ -218,8 +219,8 @@ static Visibility getVisibilityFromAttr(const T *attr) { } /// Return the explicit visibility of the given declaration. -static Optional<Visibility> getVisibilityOf(const NamedDecl *D, - NamedDecl::ExplicitVisibilityKind kind) { +static std::optional<Visibility> +getVisibilityOf(const NamedDecl *D, NamedDecl::ExplicitVisibilityKind kind) { // If we're ultimately computing the visibility of a type, look for // a 'type_visibility' attribute before looking for 'visibility'. if (kind == NamedDecl::VisibilityForType) { @@ -233,7 +234,7 @@ static Optional<Visibility> getVisibilityOf(const NamedDecl *D, return getVisibilityFromAttr(A); } - return None; + return std::nullopt; } LinkageInfo LinkageComputer::getLVForType(const Type &T, @@ -342,6 +343,10 @@ LinkageComputer::getLVForTemplateArgumentList(ArrayRef<TemplateArgument> Args, LV.merge(getTypeLinkageAndVisibility(Arg.getNullPtrType())); continue; + case TemplateArgument::StructuralValue: + LV.merge(getLVForValue(Arg.getAsStructuralValue(), computation)); + continue; + case TemplateArgument::Template: case TemplateArgument::TemplateExpansion: if (TemplateDecl *Template = @@ -391,11 +396,17 @@ void LinkageComputer::mergeTemplateLV( bool considerVisibility = shouldConsiderTemplateVisibility(fn, specInfo); - // Merge information from the template parameters. FunctionTemplateDecl *temp = specInfo->getTemplate(); - LinkageInfo tempLV = - getLVForTemplateParameterList(temp->getTemplateParameters(), computation); - LV.mergeMaybeWithVisibility(tempLV, considerVisibility); + // Merge information from the template declaration. + LinkageInfo tempLV = getLVForDecl(temp, computation); + // The linkage of the specialization should be consistent with the + // template declaration. + LV.setLinkage(tempLV.getLinkage()); + + // Merge information from the template parameters. + LinkageInfo paramsLV = + getLVForTemplateParameterList(temp->getTemplateParameters(), computation); + LV.mergeMaybeWithVisibility(paramsLV, considerVisibility); // Merge information from the template arguments. const TemplateArgumentList &templateArgs = *specInfo->TemplateArguments; @@ -459,11 +470,16 @@ void LinkageComputer::mergeTemplateLV( // Merge information from the template parameters, but ignore // visibility if we're only considering template arguments. - ClassTemplateDecl *temp = spec->getSpecializedTemplate(); - LinkageInfo tempLV = + // Merge information from the template declaration. + LinkageInfo tempLV = getLVForDecl(temp, computation); + // The linkage of the specialization should be consistent with the + // template declaration. + LV.setLinkage(tempLV.getLinkage()); + + LinkageInfo paramsLV = getLVForTemplateParameterList(temp->getTemplateParameters(), computation); - LV.mergeMaybeWithVisibility(tempLV, + LV.mergeMaybeWithVisibility(paramsLV, considerVisibility && !hasExplicitVisibilityAlready(computation)); // Merge information from the template arguments. We ignore @@ -511,7 +527,6 @@ void LinkageComputer::mergeTemplateLV(LinkageInfo &LV, // Merge information from the template parameters, but ignore // visibility if we're only considering template arguments. - VarTemplateDecl *temp = spec->getSpecializedTemplate(); LinkageInfo tempLV = getLVForTemplateParameterList(temp->getTemplateParameters(), computation); @@ -568,46 +583,13 @@ static bool isSingleLineLanguageLinkage(const Decl &D) { return false; } -/// Determine whether D is declared in the purview of a named module. -static bool isInModulePurview(const NamedDecl *D) { +static bool isDeclaredInModuleInterfaceOrPartition(const NamedDecl *D) { if (auto *M = D->getOwningModule()) - return M->isModulePurview(); + return M->isInterfaceOrPartition(); return false; } -static bool isExportedFromModuleInterfaceUnit(const NamedDecl *D) { - // FIXME: Handle isModulePrivate. - switch (D->getModuleOwnershipKind()) { - case Decl::ModuleOwnershipKind::Unowned: - case Decl::ModuleOwnershipKind::ModulePrivate: - return false; - case Decl::ModuleOwnershipKind::Visible: - case Decl::ModuleOwnershipKind::VisibleWhenImported: - return isInModulePurview(D); - } - llvm_unreachable("unexpected module ownership kind"); -} - -static LinkageInfo getInternalLinkageFor(const NamedDecl *D) { - // Internal linkage declarations within a module interface unit are modeled - // as "module-internal linkage", which means that they have internal linkage - // formally but can be indirectly accessed from outside the module via inline - // functions and templates defined within the module. - if (isInModulePurview(D)) - return LinkageInfo(ModuleInternalLinkage, DefaultVisibility, false); - - return LinkageInfo::internal(); -} - static LinkageInfo getExternalLinkageFor(const NamedDecl *D) { - // C++ Modules TS [basic.link]/6.8: - // - A name declared at namespace scope that does not have internal linkage - // by the previous rules and that is introduced by a non-exported - // declaration has module linkage. - if (isInModulePurview(D) && !isExportedFromModuleInterfaceUnit( - cast<NamedDecl>(D->getCanonicalDecl()))) - return LinkageInfo(ModuleLinkage, DefaultVisibility, false); - return LinkageInfo::external(); } @@ -630,30 +612,37 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D, assert(D->getDeclContext()->getRedeclContext()->isFileContext() && "Not a name having namespace scope"); ASTContext &Context = D->getASTContext(); + const auto *Var = dyn_cast<VarDecl>(D); // C++ [basic.link]p3: // A name having namespace scope (3.3.6) has internal linkage if it // is the name of - if (getStorageClass(D->getCanonicalDecl()) == SC_Static) { + if ((getStorageClass(D->getCanonicalDecl()) == SC_Static) || + (Context.getLangOpts().C23 && Var && Var->isConstexpr())) { // - a variable, variable template, function, or function template // that is explicitly declared static; or // (This bullet corresponds to C99 6.2.2p3.) - return getInternalLinkageFor(D); + + // C23 6.2.2p3 + // If the declaration of a file scope identifier for + // an object contains any of the storage-class specifiers static or + // constexpr then the identifier has internal linkage. + return LinkageInfo::internal(); } - if (const auto *Var = dyn_cast<VarDecl>(D)) { + if (Var) { // - a non-template variable of non-volatile const-qualified type, unless // - it is explicitly declared extern, or - // - it is inline or exported, or + // - it is declared in the purview of a module interface unit + // (outside the private-module-fragment, if any) or module partition, or + // - it is inline, or // - it was previously declared and the prior declaration did not have // internal linkage // (There is no equivalent in C99.) - if (Context.getLangOpts().CPlusPlus && - Var->getType().isConstQualified() && - !Var->getType().isVolatileQualified() && - !Var->isInline() && - !isExportedFromModuleInterfaceUnit(Var) && + if (Context.getLangOpts().CPlusPlus && Var->getType().isConstQualified() && + !Var->getType().isVolatileQualified() && !Var->isInline() && + !isDeclaredInModuleInterfaceOrPartition(Var) && !isa<VarTemplateSpecializationDecl>(Var) && !Var->getDescribedVarTemplate()) { const VarDecl *PrevVar = Var->getPreviousDecl(); @@ -663,7 +652,7 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D, if (Var->getStorageClass() != SC_Extern && Var->getStorageClass() != SC_PrivateExtern && !isSingleLineLanguageLinkage(*Var)) - return getInternalLinkageFor(Var); + return LinkageInfo::internal(); } for (const VarDecl *PrevVar = Var->getPreviousDecl(); PrevVar; @@ -673,7 +662,7 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D, return getDeclLinkageAndVisibility(PrevVar); // Explicitly declared static. if (PrevVar->getStorageClass() == SC_Static) - return getInternalLinkageFor(Var); + return LinkageInfo::internal(); } } else if (const auto *IFD = dyn_cast<IndirectFieldDecl>(D)) { // - a data member of an anonymous union. @@ -697,7 +686,7 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D, // within an unnamed namespace has internal linkage. if ((!Var || !isFirstInExternCContext(Var)) && (!Func || !isFirstInExternCContext(Func))) - return getInternalLinkageFor(D); + return LinkageInfo::internal(); } // Set up the defaults. @@ -709,7 +698,7 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D, LinkageInfo LV = getExternalLinkageFor(D); if (!hasExplicitVisibilityAlready(computation)) { - if (Optional<Visibility> Vis = getExplicitVisibility(D, computation)) { + if (std::optional<Visibility> Vis = getExplicitVisibility(D, computation)) { LV.mergeVisibility(*Vis, true); } else { // If we're declared in a namespace with a visibility attribute, @@ -719,7 +708,8 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D, DC = DC->getParent()) { const auto *ND = dyn_cast<NamespaceDecl>(DC); if (!ND) continue; - if (Optional<Visibility> Vis = getExplicitVisibility(ND, computation)) { + if (std::optional<Visibility> Vis = + getExplicitVisibility(ND, computation)) { LV.mergeVisibility(*Vis, true); break; } @@ -780,6 +770,7 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D, // // Note that we don't want to make the variable non-external // because of this, but unique-external linkage suits us. + if (Context.getLangOpts().CPlusPlus && !isFirstInExternCContext(Var) && !IgnoreVarTypeLinkage) { LinkageInfo TypeLV = getLVForType(*Var->getType(), computation); @@ -813,6 +804,16 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D, if (Function->getStorageClass() == SC_PrivateExtern) LV.mergeVisibility(HiddenVisibility, true); + // OpenMP target declare device functions are not callable from the host so + // they should not be exported from the device image. This applies to all + // functions as the host-callable kernel functions are emitted at codegen. + if (Context.getLangOpts().OpenMP && + Context.getLangOpts().OpenMPIsTargetDevice && + ((Context.getTargetInfo().getTriple().isAMDGPU() || + Context.getTargetInfo().getTriple().isNVPTX()) || + OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(Function))) + LV.mergeVisibility(HiddenVisibility, /*newExplicit=*/false); + // Note that Sema::MergeCompatibleFunctionDecls already takes care of // merging storage classes and visibility attributes, so we don't have to // look at previous decls in here. @@ -906,10 +907,6 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D, if (!isExternallyVisible(LV.getLinkage())) return LinkageInfo(LV.getLinkage(), DefaultVisibility, false); - // Mark the symbols as hidden when compiling for the device. - if (Context.getLangOpts().OpenMP && Context.getLangOpts().OpenMPIsDevice) - LV.mergeVisibility(HiddenVisibility, /*newExplicit=*/false); - return LV; } @@ -939,7 +936,7 @@ LinkageComputer::getLVForClassMember(const NamedDecl *D, // If we have an explicit visibility attribute, merge that in. if (!hasExplicitVisibilityAlready(computation)) { - if (Optional<Visibility> Vis = getExplicitVisibility(D, computation)) + if (std::optional<Visibility> Vis = getExplicitVisibility(D, computation)) LV.mergeVisibility(*Vis, true); // If we're paying attention to global visibility, apply // -finline-visibility-hidden if this is an inline method. @@ -995,6 +992,17 @@ LinkageComputer::getLVForClassMember(const NamedDecl *D, explicitSpecSuppressor = MD; } + // OpenMP target declare device functions are not callable from the host so + // they should not be exported from the device image. This applies to all + // functions as the host-callable kernel functions are emitted at codegen. + ASTContext &Context = D->getASTContext(); + if (Context.getLangOpts().OpenMP && + Context.getLangOpts().OpenMPIsTargetDevice && + ((Context.getTargetInfo().getTriple().isAMDGPU() || + Context.getTargetInfo().getTriple().isNVPTX()) || + OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(MD))) + LV.mergeVisibility(HiddenVisibility, /*newExplicit=*/false); + } else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) { if (const auto *spec = dyn_cast<ClassTemplateSpecializationDecl>(RD)) { mergeTemplateLV(LV, spec, computation); @@ -1078,6 +1086,42 @@ bool NamedDecl::isLinkageValid() const { return L == getCachedLinkage(); } +bool NamedDecl::isPlaceholderVar(const LangOptions &LangOpts) const { + // [C++2c] [basic.scope.scope]/p5 + // A declaration is name-independent if its name is _ and it declares + // - a variable with automatic storage duration, + // - a structured binding not inhabiting a namespace scope, + // - the variable introduced by an init-capture + // - or a non-static data member. + + if (!LangOpts.CPlusPlus || !getIdentifier() || + !getIdentifier()->isPlaceholder()) + return false; + if (isa<FieldDecl>(this)) + return true; + if (const auto *IFD = dyn_cast<IndirectFieldDecl>(this)) { + if (!getDeclContext()->isFunctionOrMethod() && + !getDeclContext()->isRecord()) + return false; + const VarDecl *VD = IFD->getVarDecl(); + return !VD || VD->getStorageDuration() == SD_Automatic; + } + // and it declares a variable with automatic storage duration + if (const auto *VD = dyn_cast<VarDecl>(this)) { + if (isa<ParmVarDecl>(VD)) + return false; + if (VD->isInitCapture()) + return true; + return VD->getStorageDuration() == StorageDuration::SD_Automatic; + } + if (const auto *BD = dyn_cast<BindingDecl>(this); + BD && getDeclContext()->isFunctionOrMethod()) { + const VarDecl *VD = BD->getHoldingVar(); + return !VD || VD->getStorageDuration() == StorageDuration::SD_Automatic; + } + return false; +} + ReservedIdentifierStatus NamedDecl::isReserved(const LangOptions &LangOpts) const { const IdentifierInfo *II = getIdentifier(); @@ -1088,13 +1132,29 @@ NamedDecl::isReserved(const LangOptions &LangOpts) const { return ReservedIdentifierStatus::NotReserved; ReservedIdentifierStatus Status = II->isReserved(LangOpts); - if (Status == ReservedIdentifierStatus::StartsWithUnderscoreAtGlobalScope) { - // Check if we're at TU level or not. + if (isReservedAtGlobalScope(Status) && !isReservedInAllContexts(Status)) { + // This name is only reserved at global scope. Check if this declaration + // conflicts with a global scope declaration. if (isa<ParmVarDecl>(this) || isTemplateParameter()) return ReservedIdentifierStatus::NotReserved; + + // C++ [dcl.link]/7: + // Two declarations [conflict] if [...] one declares a function or + // variable with C language linkage, and the other declares [...] a + // variable that belongs to the global scope. + // + // Therefore names that are reserved at global scope are also reserved as + // names of variables and functions with C language linkage. const DeclContext *DC = getDeclContext()->getRedeclContext(); - if (!DC->isTranslationUnit()) - return ReservedIdentifierStatus::NotReserved; + if (DC->isTranslationUnit()) + return Status; + if (auto *VD = dyn_cast<VarDecl>(this)) + if (VD->isExternC()) + return ReservedIdentifierStatus::StartsWithUnderscoreAndIsExternC; + if (auto *FD = dyn_cast<FunctionDecl>(this)) + if (FD->isExternC()) + return ReservedIdentifierStatus::StartsWithUnderscoreAndIsExternC; + return ReservedIdentifierStatus::NotReserved; } return Status; @@ -1121,18 +1181,54 @@ Linkage NamedDecl::getLinkageInternal() const { .getLinkage(); } +static bool isExportedFromModuleInterfaceUnit(const NamedDecl *D) { + // FIXME: Handle isModulePrivate. + switch (D->getModuleOwnershipKind()) { + case Decl::ModuleOwnershipKind::Unowned: + case Decl::ModuleOwnershipKind::ReachableWhenImported: + case Decl::ModuleOwnershipKind::ModulePrivate: + return false; + case Decl::ModuleOwnershipKind::Visible: + case Decl::ModuleOwnershipKind::VisibleWhenImported: + return D->isInNamedModule(); + } + llvm_unreachable("unexpected module ownership kind"); +} + +/// Get the linkage from a semantic point of view. Entities in +/// anonymous namespaces are external (in c++98). +Linkage NamedDecl::getFormalLinkage() const { + Linkage InternalLinkage = getLinkageInternal(); + + // C++ [basic.link]p4.8: + // - if the declaration of the name is attached to a named module and is not + // exported + // the name has module linkage; + // + // [basic.namespace.general]/p2 + // A namespace is never attached to a named module and never has a name with + // module linkage. + if (isInNamedModule() && InternalLinkage == Linkage::External && + !isExportedFromModuleInterfaceUnit( + cast<NamedDecl>(this->getCanonicalDecl())) && + !isa<NamespaceDecl>(this)) + InternalLinkage = Linkage::Module; + + return clang::getFormalLinkage(InternalLinkage); +} + LinkageInfo NamedDecl::getLinkageAndVisibility() const { return LinkageComputer{}.getDeclLinkageAndVisibility(this); } -static Optional<Visibility> +static std::optional<Visibility> getExplicitVisibilityAux(const NamedDecl *ND, NamedDecl::ExplicitVisibilityKind kind, bool IsMostRecent) { assert(!IsMostRecent || ND == ND->getMostRecentDecl()); // Check the declaration itself first. - if (Optional<Visibility> V = getVisibilityOf(ND, kind)) + if (std::optional<Visibility> V = getVisibilityOf(ND, kind)) return V; // If this is a member class of a specialization of a class template @@ -1152,11 +1248,11 @@ getExplicitVisibilityAux(const NamedDecl *ND, const auto *TD = spec->getSpecializedTemplate()->getTemplatedDecl(); while (TD != nullptr) { auto Vis = getVisibilityOf(TD, kind); - if (Vis != None) + if (Vis != std::nullopt) return Vis; TD = TD->getPreviousDecl(); } - return None; + return std::nullopt; } // Use the most recent declaration. @@ -1177,7 +1273,7 @@ getExplicitVisibilityAux(const NamedDecl *ND, return getVisibilityOf(VTSD->getSpecializedTemplate()->getTemplatedDecl(), kind); - return None; + return std::nullopt; } // Also handle function template specializations. if (const auto *fn = dyn_cast<FunctionDecl>(ND)) { @@ -1194,17 +1290,17 @@ getExplicitVisibilityAux(const NamedDecl *ND, if (InstantiatedFrom) return getVisibilityOf(InstantiatedFrom, kind); - return None; + return std::nullopt; } // The visibility of a template is stored in the templated decl. if (const auto *TD = dyn_cast<TemplateDecl>(ND)) return getVisibilityOf(TD->getTemplatedDecl(), kind); - return None; + return std::nullopt; } -Optional<Visibility> +std::optional<Visibility> NamedDecl::getExplicitVisibility(ExplicitVisibilityKind kind) const { return getExplicitVisibilityAux(this, kind, false); } @@ -1219,8 +1315,13 @@ LinkageInfo LinkageComputer::getLVForClosure(const DeclContext *DC, else if (isa<ParmVarDecl>(ContextDecl)) Owner = dyn_cast<NamedDecl>(ContextDecl->getDeclContext()->getRedeclContext()); - else + else if (isa<ImplicitConceptSpecializationDecl>(ContextDecl)) { + // Replace with the concept's owning decl, which is either a namespace or a + // TU, so this needs a dyn_cast. + Owner = dyn_cast<NamedDecl>(ContextDecl->getDeclContext()); + } else { Owner = cast<NamedDecl>(ContextDecl); + } if (!Owner) return LinkageInfo::none(); @@ -1239,7 +1340,7 @@ LinkageInfo LinkageComputer::getLVForClosure(const DeclContext *DC, // visible, then the lambda is too. We apply the same rules to blocks. if (!isExternallyVisible(OwnerLV.getLinkage())) return LinkageInfo::none(); - return LinkageInfo(VisibleNoLinkage, OwnerLV.getVisibility(), + return LinkageInfo(Linkage::VisibleNone, OwnerLV.getVisibility(), OwnerLV.isVisibilityExplicit()); } @@ -1248,15 +1349,15 @@ LinkageInfo LinkageComputer::getLVForLocalDecl(const NamedDecl *D, if (const auto *Function = dyn_cast<FunctionDecl>(D)) { if (Function->isInAnonymousNamespace() && !isFirstInExternCContext(Function)) - return getInternalLinkageFor(Function); + return LinkageInfo::internal(); // This is a "void f();" which got merged with a file static. if (Function->getCanonicalDecl()->getStorageClass() == SC_Static) - return getInternalLinkageFor(Function); + return LinkageInfo::internal(); LinkageInfo LV; if (!hasExplicitVisibilityAlready(computation)) { - if (Optional<Visibility> Vis = + if (std::optional<Visibility> Vis = getExplicitVisibility(Function, computation)) LV.mergeVisibility(*Vis, true); } @@ -1271,19 +1372,20 @@ LinkageInfo LinkageComputer::getLVForLocalDecl(const NamedDecl *D, if (const auto *Var = dyn_cast<VarDecl>(D)) { if (Var->hasExternalStorage()) { if (Var->isInAnonymousNamespace() && !isFirstInExternCContext(Var)) - return getInternalLinkageFor(Var); + return LinkageInfo::internal(); LinkageInfo LV; if (Var->getStorageClass() == SC_PrivateExtern) LV.mergeVisibility(HiddenVisibility, true); else if (!hasExplicitVisibilityAlready(computation)) { - if (Optional<Visibility> Vis = getExplicitVisibility(Var, computation)) + if (std::optional<Visibility> Vis = + getExplicitVisibility(Var, computation)) LV.mergeVisibility(*Vis, true); } if (const VarDecl *Prev = Var->getPreviousDecl()) { LinkageInfo PrevLV = getLVForDecl(Prev, computation); - if (PrevLV.getLinkage()) + if (PrevLV.getLinkage() != Linkage::Invalid) LV.setLinkage(PrevLV.getLinkage()); LV.mergeVisibility(PrevLV); } @@ -1334,14 +1436,14 @@ LinkageInfo LinkageComputer::getLVForLocalDecl(const NamedDecl *D, computation.isValueVisibility() ? Context.getLangOpts().getValueVisibilityMode() : Context.getLangOpts().getTypeVisibilityMode(); - return LinkageInfo(VisibleNoLinkage, globalVisibility, + return LinkageInfo(Linkage::VisibleNone, globalVisibility, /*visibilityExplicit=*/false); } } } if (!isExternallyVisible(LV.getLinkage())) return LinkageInfo::none(); - return LinkageInfo(VisibleNoLinkage, LV.getVisibility(), + return LinkageInfo(Linkage::VisibleNone, LV.getVisibility(), LV.isVisibilityExplicit()); } @@ -1350,7 +1452,7 @@ LinkageInfo LinkageComputer::computeLVForDecl(const NamedDecl *D, bool IgnoreVarTypeLinkage) { // Internal_linkage attribute overrides other considerations. if (D->hasAttr<InternalLinkageAttr>()) - return getInternalLinkageFor(D); + return LinkageInfo::internal(); // Objective-C: treat all Objective-C declarations as having external // linkage. @@ -1408,7 +1510,7 @@ LinkageInfo LinkageComputer::computeLVForDecl(const NamedDecl *D, if (Record->hasKnownLambdaInternalLinkage() || !Record->getLambdaManglingNumber()) { // This lambda has no mangling number, so it's internal. - return getInternalLinkageFor(D); + return LinkageInfo::internal(); } return getLVForClosure( @@ -1467,12 +1569,12 @@ LinkageInfo LinkageComputer::getLVForDecl(const NamedDecl *D, LVComputationKind computation) { // Internal_linkage attribute overrides other considerations. if (D->hasAttr<InternalLinkageAttr>()) - return getInternalLinkageFor(D); + return LinkageInfo::internal(); if (computation.IgnoreAllVisibility && D->hasCachedLinkage()) return LinkageInfo(D->getCachedLinkage(), DefaultVisibility, false); - if (llvm::Optional<LinkageInfo> LI = lookup(D, computation)) + if (std::optional<LinkageInfo> LI = lookup(D, computation)) return *LI; LinkageInfo LV = computeLVForDecl(D, computation); @@ -1494,7 +1596,7 @@ LinkageInfo LinkageComputer::getLVForDecl(const NamedDecl *D, // that all other computed linkages match, check that the one we just // computed also does. NamedDecl *Old = nullptr; - for (auto I : D->redecls()) { + for (auto *I : D->redecls()) { auto *T = cast<NamedDecl>(I); if (T == D) continue; @@ -1519,7 +1621,12 @@ LinkageInfo LinkageComputer::getDeclLinkageAndVisibility(const NamedDecl *D) { : CK); } -Module *Decl::getOwningModuleForLinkage(bool IgnoreLinkage) const { +Module *Decl::getOwningModuleForLinkage() const { + if (isa<NamespaceDecl>(this)) + // Namespaces never have module linkage. It is the entities within them + // that [may] do. + return nullptr; + Module *M = getOwningModule(); if (!M) return nullptr; @@ -1530,25 +1637,16 @@ Module *Decl::getOwningModuleForLinkage(bool IgnoreLinkage) const { return nullptr; case Module::ModuleInterfaceUnit: + case Module::ModuleImplementationUnit: + case Module::ModulePartitionInterface: + case Module::ModulePartitionImplementation: return M; - case Module::GlobalModuleFragment: { - // External linkage declarations in the global module have no owning module - // for linkage purposes. But internal linkage declarations in the global - // module fragment of a particular module are owned by that module for - // linkage purposes. - if (IgnoreLinkage) - return nullptr; - bool InternalLinkage; - if (auto *ND = dyn_cast<NamedDecl>(this)) - InternalLinkage = !ND->hasExternalFormalLinkage(); - else { - auto *NSD = dyn_cast<NamespaceDecl>(this); - InternalLinkage = (NSD && NSD->isAnonymousNamespace()) || - isInAnonymousNamespace(); - } - return InternalLinkage ? M->Parent : nullptr; - } + case Module::ModuleHeaderUnit: + case Module::ExplicitGlobalModuleFragment: + case Module::ImplicitGlobalModuleFragment: + // The global module shouldn't change the linkage. + return nullptr; case Module::PrivateModuleFragment: // The private module fragment is part of its containing module for linkage @@ -1559,15 +1657,19 @@ Module *Decl::getOwningModuleForLinkage(bool IgnoreLinkage) const { llvm_unreachable("unknown module kind"); } -void NamedDecl::printName(raw_ostream &os) const { - os << Name; +void NamedDecl::printName(raw_ostream &OS, const PrintingPolicy &Policy) const { + Name.print(OS, Policy); +} + +void NamedDecl::printName(raw_ostream &OS) const { + printName(OS, getASTContext().getPrintingPolicy()); } std::string NamedDecl::getQualifiedNameAsString() const { std::string QualName; llvm::raw_string_ostream OS(QualName); printQualifiedName(OS, getASTContext().getPrintingPolicy()); - return OS.str(); + return QualName; } void NamedDecl::printQualifiedName(raw_ostream &OS) const { @@ -1578,7 +1680,7 @@ void NamedDecl::printQualifiedName(raw_ostream &OS, const PrintingPolicy &P) const { if (getDeclContext()->isFunctionOrMethod()) { // We do not print '(anonymous)' for function parameters without name. - printName(OS); + printName(OS, P); return; } printNestedNameSpecifier(OS, P); @@ -1589,7 +1691,7 @@ void NamedDecl::printQualifiedName(raw_ostream &OS, // fall back to "(anonymous)". SmallString<64> NameBuffer; llvm::raw_svector_ostream NameOS(NameBuffer); - printName(NameOS); + printName(NameOS, P); if (NameBuffer.empty()) OS << "(anonymous)"; else @@ -1647,8 +1749,7 @@ void NamedDecl::printNestedNameSpecifier(raw_ostream &OS, NameInScope = ND->getDeclName(); } - for (unsigned I = Contexts.size(); I != 0; --I) { - const DeclContext *DC = Contexts[I - 1]; + for (const DeclContext *DC : llvm::reverse(Contexts)) { if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(DC)) { OS << Spec->getName(); const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs(); @@ -1713,7 +1814,7 @@ void NamedDecl::getNameForDiagnostic(raw_ostream &OS, if (Qualified) printQualifiedName(OS, Policy); else - printName(OS); + printName(OS, Policy); } template<typename T> static bool isRedeclarableImpl(Redeclarable<T> *) { @@ -1731,7 +1832,8 @@ static bool isRedeclarable(Decl::Kind K) { llvm_unreachable("unknown decl kind"); } -bool NamedDecl::declarationReplaces(NamedDecl *OldD, bool IsKnownNewer) const { +bool NamedDecl::declarationReplaces(const NamedDecl *OldD, + bool IsKnownNewer) const { assert(getDeclName() == OldD->getDeclName() && "Declaration name mismatch"); // Never replace one imported declaration with another; we need both results @@ -1761,13 +1863,13 @@ bool NamedDecl::declarationReplaces(NamedDecl *OldD, bool IsKnownNewer) const { // Using declarations can be replaced if they import the same name from the // same context. - if (auto *UD = dyn_cast<UsingDecl>(this)) { + if (const auto *UD = dyn_cast<UsingDecl>(this)) { ASTContext &Context = getASTContext(); return Context.getCanonicalNestedNameSpecifier(UD->getQualifier()) == Context.getCanonicalNestedNameSpecifier( cast<UsingDecl>(OldD)->getQualifier()); } - if (auto *UUVD = dyn_cast<UnresolvedUsingValueDecl>(this)) { + if (const auto *UUVD = dyn_cast<UnresolvedUsingValueDecl>(this)) { ASTContext &Context = getASTContext(); return Context.getCanonicalNestedNameSpecifier(UUVD->getQualifier()) == Context.getCanonicalNestedNameSpecifier( @@ -1784,7 +1886,7 @@ bool NamedDecl::declarationReplaces(NamedDecl *OldD, bool IsKnownNewer) const { // Check whether this is actually newer than OldD. We want to keep the // newer declaration. This loop will usually only iterate once, because // OldD is usually the previous declaration. - for (auto D : redecls()) { + for (const auto *D : redecls()) { if (D == OldD) break; @@ -1808,12 +1910,26 @@ bool NamedDecl::declarationReplaces(NamedDecl *OldD, bool IsKnownNewer) const { } bool NamedDecl::hasLinkage() const { - return getFormalLinkage() != NoLinkage; + switch (getFormalLinkage()) { + case Linkage::Invalid: + llvm_unreachable("Linkage hasn't been computed!"); + case Linkage::None: + return false; + case Linkage::Internal: + return true; + case Linkage::UniqueExternal: + case Linkage::VisibleNone: + llvm_unreachable("Non-formal linkage is not allowed here!"); + case Linkage::Module: + case Linkage::External: + return true; + } + llvm_unreachable("Unhandled Linkage enum"); } NamedDecl *NamedDecl::getUnderlyingDeclImpl() { NamedDecl *ND = this; - while (auto *UD = dyn_cast<UsingShadowDecl>(ND)) + if (auto *UD = dyn_cast<UsingShadowDecl>(ND)) ND = UD->getTargetDecl(); if (auto *AD = dyn_cast<ObjCCompatibleAliasDecl>(ND)) @@ -1835,7 +1951,7 @@ bool NamedDecl::isCXXInstanceMember() const { if (isa<FieldDecl>(D) || isa<IndirectFieldDecl>(D) || isa<MSPropertyDecl>(D)) return true; - if (const auto *MD = dyn_cast_or_null<CXXMethodDecl>(D->getAsFunction())) + if (const auto *MD = dyn_cast_if_present<CXXMethodDecl>(D->getAsFunction())) return MD->isInstance(); return false; } @@ -1999,7 +2115,7 @@ const char *VarDecl::getStorageClassSpecifierString(StorageClass SC) { VarDecl::VarDecl(Kind DK, ASTContext &C, DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc, - IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo, + const IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo, StorageClass SC) : DeclaratorDecl(DK, DC, IdLoc, Id, T, TInfo, StartLoc), redeclarable_base(C) { @@ -2014,14 +2130,13 @@ VarDecl::VarDecl(Kind DK, ASTContext &C, DeclContext *DC, // Everything else is implicitly initialized to false. } -VarDecl *VarDecl::Create(ASTContext &C, DeclContext *DC, - SourceLocation StartL, SourceLocation IdL, - IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo, - StorageClass S) { +VarDecl *VarDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation StartL, + SourceLocation IdL, const IdentifierInfo *Id, + QualType T, TypeSourceInfo *TInfo, StorageClass S) { return new (C, DC) VarDecl(Var, C, DC, StartL, IdL, Id, T, TInfo, S); } -VarDecl *VarDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +VarDecl *VarDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) { return new (C, ID) VarDecl(Var, C, nullptr, SourceLocation(), SourceLocation(), nullptr, QualType(), nullptr, SC_None); @@ -2074,8 +2189,7 @@ static LanguageLinkage getDeclLanguageLinkage(const T &D) { // Language linkage is a C++ concept, but saying that everything else in C has // C language linkage fits the implementation nicely. - ASTContext &Context = D.getASTContext(); - if (!Context.getLangOpts().CPlusPlus) + if (!D.getASTContext().getLangOpts().CPlusPlus) return CLanguageLinkage; // C++ [dcl.link]p4: A C language linkage is ignored in determining the @@ -2216,20 +2330,24 @@ VarDecl *VarDecl::getActingDefinition() { return nullptr; VarDecl *LastTentative = nullptr; - VarDecl *First = getFirstDecl(); - for (auto I : First->redecls()) { - Kind = I->isThisDeclarationADefinition(); + + // Loop through the declaration chain, starting with the most recent. + for (VarDecl *Decl = getMostRecentDecl(); Decl; + Decl = Decl->getPreviousDecl()) { + Kind = Decl->isThisDeclarationADefinition(); if (Kind == Definition) return nullptr; - if (Kind == TentativeDefinition) - LastTentative = I; + // Record the first (most recent) TentativeDefinition that is encountered. + if (Kind == TentativeDefinition && !LastTentative) + LastTentative = Decl; } + return LastTentative; } VarDecl *VarDecl::getDefinition(ASTContext &C) { VarDecl *First = getFirstDecl(); - for (auto I : First->redecls()) { + for (auto *I : First->redecls()) { if (I->isThisDeclarationADefinition(C) == Definition) return I; } @@ -2240,7 +2358,7 @@ VarDecl::DefinitionKind VarDecl::hasDefinition(ASTContext &C) const { DefinitionKind Kind = DeclarationOnly; const VarDecl *First = getFirstDecl(); - for (auto I : First->redecls()) { + for (auto *I : First->redecls()) { Kind = std::max(Kind, I->isThisDeclarationADefinition(C)); if (Kind == Definition) break; @@ -2250,7 +2368,7 @@ VarDecl::DefinitionKind VarDecl::hasDefinition(ASTContext &C) const { } const Expr *VarDecl::getAnyInitializer(const VarDecl *&D) const { - for (auto I : redecls()) { + for (auto *I : redecls()) { if (auto Expr = I->getInit()) { D = I; return Expr; @@ -2264,6 +2382,9 @@ bool VarDecl::hasInit() const { if (P->hasUnparsedDefaultArg() || P->hasUninstantiatedDefaultArg()) return false; + if (auto *Eval = getEvaluatedStmt()) + return Eval->Value.isValid(); + return !Init.isNull(); } @@ -2274,19 +2395,22 @@ Expr *VarDecl::getInit() { if (auto *S = Init.dyn_cast<Stmt *>()) return cast<Expr>(S); - return cast_or_null<Expr>(Init.get<EvaluatedStmt *>()->Value); + auto *Eval = getEvaluatedStmt(); + + return cast<Expr>(Eval->Value.get( + Eval->Value.isOffset() ? getASTContext().getExternalSource() : nullptr)); } Stmt **VarDecl::getInitAddress() { if (auto *ES = Init.dyn_cast<EvaluatedStmt *>()) - return &ES->Value; + return ES->Value.getAddressOfPointer(getASTContext().getExternalSource()); return Init.getAddrOfPtr1(); } VarDecl *VarDecl::getInitializingDeclaration() { VarDecl *Def = nullptr; - for (auto I : redecls()) { + for (auto *I : redecls()) { if (I->hasInit()) return I; @@ -2329,7 +2453,7 @@ bool VarDecl::mightBeUsableInConstantExpressions(const ASTContext &C) const { // OpenCL permits const integral variables to be used in constant // expressions, like in C++98. - if (!Lang.CPlusPlus && !Lang.OpenCL) + if (!Lang.CPlusPlus && !Lang.OpenCL && !Lang.C23) return false; // Function parameters are never usable in constant expressions. @@ -2351,14 +2475,19 @@ bool VarDecl::mightBeUsableInConstantExpressions(const ASTContext &C) const { if (!getType().isConstant(C) || getType().isVolatileQualified()) return false; - // In C++, const, non-volatile variables of integral or enumeration types - // can be used in constant expressions. - if (getType()->isIntegralOrEnumerationType()) + // In C++, but not in C, const, non-volatile variables of integral or + // enumeration types can be used in constant expressions. + if (getType()->isIntegralOrEnumerationType() && !Lang.C23) return true; + // C23 6.6p7: An identifier that is: + // ... + // - declared with storage-class specifier constexpr and has an object type, + // is a named constant, ... such a named constant is a constant expression + // with the type and value of the declared object. // Additionally, in C++11, non-volatile constexpr variables can be used in // constant expressions. - return Lang.CPlusPlus11 && isConstexpr(); + return (Lang.CPlusPlus11 || Lang.C23) && isConstexpr(); } bool VarDecl::isUsableInConstantExpressions(const ASTContext &Context) const { @@ -2374,7 +2503,8 @@ bool VarDecl::isUsableInConstantExpressions(const ASTContext &Context) const { if (!DefVD->mightBeUsableInConstantExpressions(Context)) return false; // ... and its initializer is a constant initializer. - if (Context.getLangOpts().CPlusPlus && !DefVD->hasConstantInitialization()) + if ((Context.getLangOpts().CPlusPlus || getLangOpts().C23) && + !DefVD->hasConstantInitialization()) return false; // C++98 [expr.const]p1: // An integral constant-expression can involve only [...] const variables @@ -2409,14 +2539,14 @@ EvaluatedStmt *VarDecl::getEvaluatedStmt() const { APValue *VarDecl::evaluateValue() const { SmallVector<PartialDiagnosticAt, 8> Notes; - return evaluateValue(Notes); + return evaluateValueImpl(Notes, hasConstantInitialization()); } -APValue *VarDecl::evaluateValue( - SmallVectorImpl<PartialDiagnosticAt> &Notes) const { +APValue *VarDecl::evaluateValueImpl(SmallVectorImpl<PartialDiagnosticAt> &Notes, + bool IsConstantInitialization) const { EvaluatedStmt *Eval = ensureEvaluatedStmt(); - const auto *Init = cast<Expr>(Eval->Value); + const auto *Init = getInit(); assert(!Init->isValueDependent()); // We only produce notes indicating why an initializer is non-constant the @@ -2432,8 +2562,19 @@ APValue *VarDecl::evaluateValue( Eval->IsEvaluating = true; - bool Result = Init->EvaluateAsInitializer(Eval->Evaluated, getASTContext(), - this, Notes); + ASTContext &Ctx = getASTContext(); + bool Result = Init->EvaluateAsInitializer(Eval->Evaluated, Ctx, this, Notes, + IsConstantInitialization); + + // In C++, or in C23 if we're initialising a 'constexpr' variable, this isn't + // a constant initializer if we produced notes. In that case, we can't keep + // the result, because it may only be correct under the assumption that the + // initializer is a constant context. + if (IsConstantInitialization && + (Ctx.getLangOpts().CPlusPlus || + (isConstexpr() && Ctx.getLangOpts().C23)) && + !Notes.empty()) + Result = false; // Ensure the computed APValue is cleaned up later if evaluation succeeded, // or that it's empty (so that there's nothing to clean up) if evaluation @@ -2441,7 +2582,7 @@ APValue *VarDecl::evaluateValue( if (!Result) Eval->Evaluated = APValue(); else if (Eval->Evaluated.needsCleanup()) - getASTContext().addDestruction(&Eval->Evaluated); + Ctx.addDestruction(&Eval->Evaluated); Eval->IsEvaluating = false; Eval->WasEvaluated = true; @@ -2470,8 +2611,11 @@ bool VarDecl::hasICEInitializer(const ASTContext &Context) const { } bool VarDecl::hasConstantInitialization() const { - // In C, all globals (and only globals) have constant initialization. - if (hasGlobalStorage() && !getASTContext().getLangOpts().CPlusPlus) + // In C, all globals and constexpr variables should have constant + // initialization. For constexpr variables in C check that initializer is a + // constant initializer because they can be used in constant expressions. + if (hasGlobalStorage() && !getASTContext().getLangOpts().CPlusPlus && + !isConstexpr()) return true; // In C++, it depends on whether the evaluation at the point of definition @@ -2490,12 +2634,21 @@ bool VarDecl::checkForConstantInitialization( // std::is_constant_evaluated()). assert(!Eval->WasEvaluated && "already evaluated var value before checking for constant init"); - assert(getASTContext().getLangOpts().CPlusPlus && "only meaningful in C++"); + assert((getASTContext().getLangOpts().CPlusPlus || + getASTContext().getLangOpts().C23) && + "only meaningful in C++/C23"); - assert(!cast<Expr>(Eval->Value)->isValueDependent()); + assert(!getInit()->isValueDependent()); // Evaluate the initializer to check whether it's a constant expression. - Eval->HasConstantInitialization = evaluateValue(Notes) && Notes.empty(); + Eval->HasConstantInitialization = + evaluateValueImpl(Notes, true) && Notes.empty(); + + // If evaluation as a constant initializer failed, allow re-evaluation as a + // non-constant initializer if we later find we want the value. + if (!Eval->HasConstantInitialization) + Eval->WasEvaluated = false; + return Eval->HasConstantInitialization; } @@ -2521,7 +2674,7 @@ bool VarDecl::isNonEscapingByref() const { bool VarDecl::hasDependentAlignment() const { QualType T = getType(); - return T->isDependentType() || T->isUndeducedAutoType() || + return T->isDependentType() || T->isUndeducedType() || llvm::any_of(specific_attrs<AlignedAttr>(), [](const AlignedAttr *AA) { return AA->isAlignmentDependent(); }); @@ -2667,6 +2820,42 @@ VarDecl::needsDestruction(const ASTContext &Ctx) const { return getType().isDestructedType(); } +bool VarDecl::hasFlexibleArrayInit(const ASTContext &Ctx) const { + assert(hasInit() && "Expect initializer to check for flexible array init"); + auto *Ty = getType()->getAs<RecordType>(); + if (!Ty || !Ty->getDecl()->hasFlexibleArrayMember()) + return false; + auto *List = dyn_cast<InitListExpr>(getInit()->IgnoreParens()); + if (!List) + return false; + const Expr *FlexibleInit = List->getInit(List->getNumInits() - 1); + auto InitTy = Ctx.getAsConstantArrayType(FlexibleInit->getType()); + if (!InitTy) + return false; + return !InitTy->isZeroSize(); +} + +CharUnits VarDecl::getFlexibleArrayInitChars(const ASTContext &Ctx) const { + assert(hasInit() && "Expect initializer to check for flexible array init"); + auto *Ty = getType()->getAs<RecordType>(); + if (!Ty || !Ty->getDecl()->hasFlexibleArrayMember()) + return CharUnits::Zero(); + auto *List = dyn_cast<InitListExpr>(getInit()->IgnoreParens()); + if (!List || List->getNumInits() == 0) + return CharUnits::Zero(); + const Expr *FlexibleInit = List->getInit(List->getNumInits() - 1); + auto InitTy = Ctx.getAsConstantArrayType(FlexibleInit->getType()); + if (!InitTy) + return CharUnits::Zero(); + CharUnits FlexibleArraySize = Ctx.getTypeSizeInChars(InitTy); + const ASTRecordLayout &RL = Ctx.getASTRecordLayout(Ty->getDecl()); + CharUnits FlexibleArrayOffset = + Ctx.toCharUnitsFromBits(RL.getFieldOffset(RL.getFieldCount() - 1)); + if (FlexibleArrayOffset + FlexibleArraySize < RL.getSize()) + return CharUnits::Zero(); + return FlexibleArrayOffset + FlexibleArraySize - RL.getSize(); +} + MemberSpecializationInfo *VarDecl::getMemberSpecializationInfo() const { if (isStaticDataMember()) // FIXME: Remove ? @@ -2716,10 +2905,10 @@ VarDecl::setInstantiationOfStaticDataMember(VarDecl *VD, //===----------------------------------------------------------------------===// ParmVarDecl *ParmVarDecl::Create(ASTContext &C, DeclContext *DC, - SourceLocation StartLoc, - SourceLocation IdLoc, IdentifierInfo *Id, - QualType T, TypeSourceInfo *TInfo, - StorageClass S, Expr *DefArg) { + SourceLocation StartLoc, SourceLocation IdLoc, + const IdentifierInfo *Id, QualType T, + TypeSourceInfo *TInfo, StorageClass S, + Expr *DefArg) { return new (C, DC) ParmVarDecl(ParmVar, C, DC, StartLoc, IdLoc, Id, T, TInfo, S, DefArg); } @@ -2732,7 +2921,7 @@ QualType ParmVarDecl::getOriginalType() const { return T; } -ParmVarDecl *ParmVarDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +ParmVarDecl *ParmVarDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) { return new (C, ID) ParmVarDecl(ParmVar, C, nullptr, SourceLocation(), SourceLocation(), nullptr, QualType(), nullptr, SC_None, nullptr); @@ -2754,11 +2943,15 @@ SourceRange ParmVarDecl::getSourceRange() const { } bool ParmVarDecl::isDestroyedInCallee() const { + // ns_consumed only affects code generation in ARC if (hasAttr<NSConsumedAttr>()) - return true; + return getASTContext().getLangOpts().ObjCAutoRefCount; - auto *RT = getType()->getAs<RecordType>(); - if (RT && RT->getDecl()->isParamDestroyedInCallee()) + // FIXME: isParamDestroyedInCallee() should probably imply + // isDestructedType() + const auto *RT = getType()->getAs<RecordType>(); + if (RT && RT->getDecl()->isParamDestroyedInCallee() && + getType().isDestructedType()) return true; return false; @@ -2770,7 +2963,7 @@ Expr *ParmVarDecl::getDefaultArg() { "Default argument is not yet instantiated!"); Expr *Arg = getInit(); - if (auto *E = dyn_cast_or_null<FullExpr>(Arg)) + if (auto *E = dyn_cast_if_present<FullExpr>(Arg)) return E->getSubExpr(); return Arg; @@ -2809,7 +3002,7 @@ void ParmVarDecl::setUninstantiatedDefaultArg(Expr *arg) { Expr *ParmVarDecl::getUninstantiatedDefaultArg() { assert(hasUninstantiatedDefaultArg() && "Wrong kind of initialization expression!"); - return cast_or_null<Expr>(Init.get<Stmt *>()); + return cast_if_present<Expr>(Init.get<Stmt *>()); } bool ParmVarDecl::hasDefaultArg() const { @@ -2837,7 +3030,7 @@ FunctionDecl::FunctionDecl(Kind DK, ASTContext &C, DeclContext *DC, SourceLocation StartLoc, const DeclarationNameInfo &NameInfo, QualType T, TypeSourceInfo *TInfo, StorageClass S, - bool isInlineSpecified, + bool UsesFPIntrin, bool isInlineSpecified, ConstexprSpecKind ConstexprKind, Expr *TrailingRequiresClause) : DeclaratorDecl(DK, DC, NameInfo.getLoc(), NameInfo.getName(), T, TInfo, @@ -2849,7 +3042,7 @@ FunctionDecl::FunctionDecl(Kind DK, ASTContext &C, DeclContext *DC, FunctionDeclBits.IsInline = isInlineSpecified; FunctionDeclBits.IsInlineSpecified = isInlineSpecified; FunctionDeclBits.IsVirtualAsWritten = false; - FunctionDeclBits.IsPure = false; + FunctionDeclBits.IsPureVirtual = false; FunctionDeclBits.HasInheritedPrototype = false; FunctionDeclBits.HasWrittenPrototype = true; FunctionDeclBits.IsDeleted = false; @@ -2857,18 +3050,22 @@ FunctionDecl::FunctionDecl(Kind DK, ASTContext &C, DeclContext *DC, FunctionDeclBits.IsTrivialForCall = false; FunctionDeclBits.IsDefaulted = false; FunctionDeclBits.IsExplicitlyDefaulted = false; - FunctionDeclBits.HasDefaultedFunctionInfo = false; + FunctionDeclBits.HasDefaultedOrDeletedInfo = false; + FunctionDeclBits.IsIneligibleOrNotSelected = false; FunctionDeclBits.HasImplicitReturnZero = false; FunctionDeclBits.IsLateTemplateParsed = false; FunctionDeclBits.ConstexprKind = static_cast<uint64_t>(ConstexprKind); + FunctionDeclBits.BodyContainsImmediateEscalatingExpression = false; FunctionDeclBits.InstantiationIsPending = false; FunctionDeclBits.UsesSEHTry = false; - FunctionDeclBits.UsesFPIntrin = false; + FunctionDeclBits.UsesFPIntrin = UsesFPIntrin; FunctionDeclBits.HasSkippedBody = false; FunctionDeclBits.WillHaveBody = false; FunctionDeclBits.IsMultiVersion = false; - FunctionDeclBits.IsCopyDeductionCandidate = false; + FunctionDeclBits.DeductionCandidateKind = + static_cast<unsigned char>(DeductionCandidate::Normal); FunctionDeclBits.HasODRHash = false; + FunctionDeclBits.FriendConstraintRefersToEnclosingTemplate = false; if (TrailingRequiresClause) setTrailingRequiresClause(TrailingRequiresClause); } @@ -2887,34 +3084,69 @@ bool FunctionDecl::isVariadic() const { return false; } -FunctionDecl::DefaultedFunctionInfo * -FunctionDecl::DefaultedFunctionInfo::Create(ASTContext &Context, - ArrayRef<DeclAccessPair> Lookups) { - DefaultedFunctionInfo *Info = new (Context.Allocate( - totalSizeToAlloc<DeclAccessPair>(Lookups.size()), - std::max(alignof(DefaultedFunctionInfo), alignof(DeclAccessPair)))) - DefaultedFunctionInfo; +FunctionDecl::DefaultedOrDeletedFunctionInfo * +FunctionDecl::DefaultedOrDeletedFunctionInfo::Create( + ASTContext &Context, ArrayRef<DeclAccessPair> Lookups, + StringLiteral *DeletedMessage) { + static constexpr size_t Alignment = + std::max({alignof(DefaultedOrDeletedFunctionInfo), + alignof(DeclAccessPair), alignof(StringLiteral *)}); + size_t Size = totalSizeToAlloc<DeclAccessPair, StringLiteral *>( + Lookups.size(), DeletedMessage != nullptr); + + DefaultedOrDeletedFunctionInfo *Info = + new (Context.Allocate(Size, Alignment)) DefaultedOrDeletedFunctionInfo; Info->NumLookups = Lookups.size(); + Info->HasDeletedMessage = DeletedMessage != nullptr; + std::uninitialized_copy(Lookups.begin(), Lookups.end(), Info->getTrailingObjects<DeclAccessPair>()); + if (DeletedMessage) + *Info->getTrailingObjects<StringLiteral *>() = DeletedMessage; return Info; } -void FunctionDecl::setDefaultedFunctionInfo(DefaultedFunctionInfo *Info) { - assert(!FunctionDeclBits.HasDefaultedFunctionInfo && "already have this"); +void FunctionDecl::setDefaultedOrDeletedInfo( + DefaultedOrDeletedFunctionInfo *Info) { + assert(!FunctionDeclBits.HasDefaultedOrDeletedInfo && "already have this"); assert(!Body && "can't replace function body with defaulted function info"); - FunctionDeclBits.HasDefaultedFunctionInfo = true; - DefaultedInfo = Info; + FunctionDeclBits.HasDefaultedOrDeletedInfo = true; + DefaultedOrDeletedInfo = Info; } -FunctionDecl::DefaultedFunctionInfo * -FunctionDecl::getDefaultedFunctionInfo() const { - return FunctionDeclBits.HasDefaultedFunctionInfo ? DefaultedInfo : nullptr; +void FunctionDecl::setDeletedAsWritten(bool D, StringLiteral *Message) { + FunctionDeclBits.IsDeleted = D; + + if (Message) { + assert(isDeletedAsWritten() && "Function must be deleted"); + if (FunctionDeclBits.HasDefaultedOrDeletedInfo) + DefaultedOrDeletedInfo->setDeletedMessage(Message); + else + setDefaultedOrDeletedInfo(DefaultedOrDeletedFunctionInfo::Create( + getASTContext(), /*Lookups=*/{}, Message)); + } +} + +void FunctionDecl::DefaultedOrDeletedFunctionInfo::setDeletedMessage( + StringLiteral *Message) { + // We should never get here with the DefaultedOrDeletedInfo populated, but + // no space allocated for the deleted message, since that would require + // recreating this, but setDefaultedOrDeletedInfo() disallows overwriting + // an already existing DefaultedOrDeletedFunctionInfo. + assert(HasDeletedMessage && + "No space to store a delete message in this DefaultedOrDeletedInfo"); + *getTrailingObjects<StringLiteral *>() = Message; +} + +FunctionDecl::DefaultedOrDeletedFunctionInfo * +FunctionDecl::getDefalutedOrDeletedInfo() const { + return FunctionDeclBits.HasDefaultedOrDeletedInfo ? DefaultedOrDeletedInfo + : nullptr; } bool FunctionDecl::hasBody(const FunctionDecl *&Definition) const { - for (auto I : redecls()) { + for (const auto *I : redecls()) { if (I->doesThisDeclarationHaveABody()) { Definition = I; return true; @@ -2925,7 +3157,7 @@ bool FunctionDecl::hasBody(const FunctionDecl *&Definition) const { } bool FunctionDecl::hasTrivialBody() const { - Stmt *S = getBody(); + const Stmt *S = getBody(); if (!S) { // Since we don't have a body for this function, we don't know if it's // trivial or not. @@ -2997,7 +3229,7 @@ Stmt *FunctionDecl::getBody(const FunctionDecl *&Definition) const { if (!hasBody(Definition)) return nullptr; - assert(!Definition->FunctionDeclBits.HasDefaultedFunctionInfo && + assert(!Definition->FunctionDeclBits.HasDefaultedOrDeletedInfo && "definition should not have a body"); if (Definition->Body) return Definition->Body.get(getASTContext().getExternalSource()); @@ -3006,14 +3238,14 @@ Stmt *FunctionDecl::getBody(const FunctionDecl *&Definition) const { } void FunctionDecl::setBody(Stmt *B) { - FunctionDeclBits.HasDefaultedFunctionInfo = false; + FunctionDeclBits.HasDefaultedOrDeletedInfo = false; Body = LazyDeclStmtPtr(B); if (B) EndRangeLoc = B->getEndLoc(); } -void FunctionDecl::setPure(bool P) { - FunctionDeclBits.IsPure = P; +void FunctionDecl::setIsPureVirtual(bool P) { + FunctionDeclBits.IsPureVirtual = P; if (P) if (auto *Parent = dyn_cast<CXXRecordDecl>(getDeclContext())) Parent->markedVirtualFunctionPure(); @@ -3021,10 +3253,48 @@ void FunctionDecl::setPure(bool P) { template<std::size_t Len> static bool isNamed(const NamedDecl *ND, const char (&Str)[Len]) { - IdentifierInfo *II = ND->getIdentifier(); + const IdentifierInfo *II = ND->getIdentifier(); return II && II->isStr(Str); } +bool FunctionDecl::isImmediateEscalating() const { + // C++23 [expr.const]/p17 + // An immediate-escalating function is + // - the call operator of a lambda that is not declared with the consteval + // specifier, + if (isLambdaCallOperator(this) && !isConsteval()) + return true; + // - a defaulted special member function that is not declared with the + // consteval specifier, + if (isDefaulted() && !isConsteval()) + return true; + // - a function that results from the instantiation of a templated entity + // defined with the constexpr specifier. + TemplatedKind TK = getTemplatedKind(); + if (TK != TK_NonTemplate && TK != TK_DependentNonTemplate && + isConstexprSpecified()) + return true; + return false; +} + +bool FunctionDecl::isImmediateFunction() const { + // C++23 [expr.const]/p18 + // An immediate function is a function or constructor that is + // - declared with the consteval specifier + if (isConsteval()) + return true; + // - an immediate-escalating function F whose function body contains an + // immediate-escalating expression + if (isImmediateEscalating() && BodyContainsImmediateEscalatingExpressions()) + return true; + + if (const auto *MD = dyn_cast<CXXMethodDecl>(this); + MD && MD->isLambdaStaticInvoker()) + return MD->getParent()->getLambdaCallOperator()->isImmediateFunction(); + + return false; +} + bool FunctionDecl::isMain() const { const TranslationUnitDecl *tunit = dyn_cast<TranslationUnitDecl>(getDeclContext()->getRedeclContext()); @@ -3061,11 +3331,13 @@ bool FunctionDecl::isMSVCRTEntryPoint() const { } bool FunctionDecl::isReservedGlobalPlacementOperator() const { - assert(getDeclName().getNameKind() == DeclarationName::CXXOperatorName); - assert(getDeclName().getCXXOverloadedOperator() == OO_New || - getDeclName().getCXXOverloadedOperator() == OO_Delete || - getDeclName().getCXXOverloadedOperator() == OO_Array_New || - getDeclName().getCXXOverloadedOperator() == OO_Array_Delete); + if (getDeclName().getNameKind() != DeclarationName::CXXOperatorName) + return false; + if (getDeclName().getCXXOverloadedOperator() != OO_New && + getDeclName().getCXXOverloadedOperator() != OO_Delete && + getDeclName().getCXXOverloadedOperator() != OO_Array_New && + getDeclName().getCXXOverloadedOperator() != OO_Array_Delete) + return false; if (!getDeclContext()->getRedeclContext()->isTranslationUnit()) return false; @@ -3074,9 +3346,9 @@ bool FunctionDecl::isReservedGlobalPlacementOperator() const { if (proto->getNumParams() != 2 || proto->isVariadic()) return false; - ASTContext &Context = - cast<TranslationUnitDecl>(getDeclContext()->getRedeclContext()) - ->getASTContext(); + const ASTContext &Context = + cast<TranslationUnitDecl>(getDeclContext()->getRedeclContext()) + ->getASTContext(); // The result type and first argument type are constant across all // these operators. The second argument must be exactly void*. @@ -3084,7 +3356,7 @@ bool FunctionDecl::isReservedGlobalPlacementOperator() const { } bool FunctionDecl::isReplaceableGlobalAllocationFunction( - Optional<unsigned> *AlignmentParam, bool *IsNothrow) const { + std::optional<unsigned> *AlignmentParam, bool *IsNothrow) const { if (getDeclName().getNameKind() != DeclarationName::CXXOperatorName) return false; if (getDeclName().getCXXOverloadedOperator() != OO_New && @@ -3101,7 +3373,7 @@ bool FunctionDecl::isReplaceableGlobalAllocationFunction( return false; const auto *FPT = getType()->castAs<FunctionProtoType>(); - if (FPT->getNumParams() == 0 || FPT->getNumParams() > 3 || FPT->isVariadic()) + if (FPT->getNumParams() == 0 || FPT->getNumParams() > 4 || FPT->isVariadic()) return false; // If this is a single-parameter function, it must be a replaceable global @@ -3111,7 +3383,7 @@ bool FunctionDecl::isReplaceableGlobalAllocationFunction( unsigned Params = 1; QualType Ty = FPT->getParamType(Params); - ASTContext &Ctx = getASTContext(); + const ASTContext &Ctx = getASTContext(); auto Consume = [&] { ++Params; @@ -3136,8 +3408,8 @@ bool FunctionDecl::isReplaceableGlobalAllocationFunction( *AlignmentParam = Params; } - // Finally, if this is not a sized delete, the final parameter can - // be a 'const std::nothrow_t&'. + // If this is not a sized delete, the next parameter can be a + // 'const std::nothrow_t&'. if (!IsSizedDelete && !Ty.isNull() && Ty->isReferenceType()) { Ty = Ty->getPointeeType(); if (Ty.getCVRQualifiers() != Qualifiers::Const) @@ -3149,6 +3421,20 @@ bool FunctionDecl::isReplaceableGlobalAllocationFunction( } } + // Finally, recognize the not yet standard versions of new that take a + // hot/cold allocation hint (__hot_cold_t). These are currently supported by + // tcmalloc (see + // https://github.com/google/tcmalloc/blob/220043886d4e2efff7a5702d5172cb8065253664/tcmalloc/malloc_extension.h#L53). + if (!IsSizedDelete && !Ty.isNull() && Ty->isEnumeralType()) { + QualType T = Ty; + while (const auto *TD = T->getAs<TypedefType>()) + T = TD->getDecl()->getUnderlyingType(); + const IdentifierInfo *II = + T->castAs<EnumType>()->getDecl()->getIdentifier(); + if (II && II->isStr("__hot_cold_t")) + Consume(); + } + return Params == FPT->getNumParams(); } @@ -3157,7 +3443,24 @@ bool FunctionDecl::isInlineBuiltinDeclaration() const { return false; const FunctionDecl *Definition; - return hasBody(Definition) && Definition->isInlineSpecified(); + if (!hasBody(Definition)) + return false; + + if (!Definition->isInlineSpecified() || + !Definition->hasAttr<AlwaysInlineAttr>()) + return false; + + ASTContext &Context = getASTContext(); + switch (Context.GetGVALinkageForFunction(Definition)) { + case GVA_Internal: + case GVA_DiscardableODR: + case GVA_StrongODR: + return false; + case GVA_AvailableExternally: + case GVA_StrongExternal: + return true; + } + llvm_unreachable("Unknown GVALinkage"); } bool FunctionDecl::isDestroyingOperatorDelete() const { @@ -3205,7 +3508,6 @@ bool FunctionDecl::isGlobal() const { if (const auto *Namespace = cast<NamespaceDecl>(DC)) { if (!Namespace->getDeclName()) return false; - break; } } @@ -3223,14 +3525,39 @@ bool FunctionDecl::isNoReturn() const { return false; } +bool FunctionDecl::isMemberLikeConstrainedFriend() const { + // C++20 [temp.friend]p9: + // A non-template friend declaration with a requires-clause [or] + // a friend function template with a constraint that depends on a template + // parameter from an enclosing template [...] does not declare the same + // function or function template as a declaration in any other scope. + + // If this isn't a friend then it's not a member-like constrained friend. + if (!getFriendObjectKind()) { + return false; + } + + if (!getDescribedFunctionTemplate()) { + // If these friends don't have constraints, they aren't constrained, and + // thus don't fall under temp.friend p9. Else the simple presence of a + // constraint makes them unique. + return getTrailingRequiresClause(); + } + + return FriendConstraintRefersToEnclosingTemplate(); +} MultiVersionKind FunctionDecl::getMultiVersionKind() const { if (hasAttr<TargetAttr>()) return MultiVersionKind::Target; + if (hasAttr<TargetVersionAttr>()) + return MultiVersionKind::TargetVersion; if (hasAttr<CPUDispatchAttr>()) return MultiVersionKind::CPUDispatch; if (hasAttr<CPUSpecificAttr>()) return MultiVersionKind::CPUSpecific; + if (hasAttr<TargetClonesAttr>()) + return MultiVersionKind::TargetClones; return MultiVersionKind::None; } @@ -3243,7 +3570,25 @@ bool FunctionDecl::isCPUSpecificMultiVersion() const { } bool FunctionDecl::isTargetMultiVersion() const { - return isMultiVersion() && hasAttr<TargetAttr>(); + return isMultiVersion() && + (hasAttr<TargetAttr>() || hasAttr<TargetVersionAttr>()); +} + +bool FunctionDecl::isTargetMultiVersionDefault() const { + if (!isMultiVersion()) + return false; + if (hasAttr<TargetAttr>()) + return getAttr<TargetAttr>()->isDefaultVersion(); + return hasAttr<TargetVersionAttr>() && + getAttr<TargetVersionAttr>()->isDefaultVersion(); +} + +bool FunctionDecl::isTargetClonesMultiVersion() const { + return isMultiVersion() && hasAttr<TargetClonesAttr>(); +} + +bool FunctionDecl::isTargetVersionMultiVersion() const { + return isMultiVersion() && hasAttr<TargetVersionAttr>(); } void @@ -3296,7 +3641,7 @@ unsigned FunctionDecl::getBuiltinID(bool ConsiderWrapperFunctions) const { (!hasAttr<ArmBuiltinAliasAttr>() && !hasAttr<BuiltinAliasAttr>())) return 0; - ASTContext &Context = getASTContext(); + const ASTContext &Context = getASTContext(); if (!Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID)) return BuiltinID; @@ -3325,7 +3670,7 @@ unsigned FunctionDecl::getBuiltinID(bool ConsiderWrapperFunctions) const { // library, none of the predefined library functions except printf and malloc // should be treated as a builtin i.e. 0 should be returned for them. if (Context.getTargetInfo().getTriple().isAMDGCN() && - Context.getLangOpts().OpenMPIsDevice && + Context.getLangOpts().OpenMPIsTargetDevice && Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID) && !(BuiltinID == Builtin::BIprintf || BuiltinID == Builtin::BImalloc)) return 0; @@ -3375,11 +3720,25 @@ unsigned FunctionDecl::getMinRequiredArguments() const { return NumRequiredArgs; } +bool FunctionDecl::hasCXXExplicitFunctionObjectParameter() const { + return getNumParams() != 0 && getParamDecl(0)->isExplicitObjectParameter(); +} + +unsigned FunctionDecl::getNumNonObjectParams() const { + return getNumParams() - + static_cast<unsigned>(hasCXXExplicitFunctionObjectParameter()); +} + +unsigned FunctionDecl::getMinRequiredExplicitArguments() const { + return getMinRequiredArguments() - + static_cast<unsigned>(hasCXXExplicitFunctionObjectParameter()); +} + bool FunctionDecl::hasOneParamOrDefaultArgs() const { return getNumParams() == 1 || (getNumParams() > 1 && - std::all_of(param_begin() + 1, param_end(), - [](ParmVarDecl *P) { return P->hasDefaultArg(); })); + llvm::all_of(llvm::drop_begin(parameters()), + [](ParmVarDecl *P) { return P->hasDefaultArg(); })); } /// The combination of the extern and inline keywords under MSVC forces @@ -3441,7 +3800,7 @@ bool FunctionDecl::doesDeclarationForceExternallyVisibleDefinition() const { assert(!doesThisDeclarationHaveABody() && "Must have a declaration without a body."); - ASTContext &Context = getASTContext(); + const ASTContext &Context = getASTContext(); if (Context.getLangOpts().MSVCCompat) { const FunctionDecl *Definition; @@ -3578,7 +3937,7 @@ bool FunctionDecl::isInlineDefinitionExternallyVisible() const { // If any declaration is 'inline' but not 'extern', then this definition // is externally visible. - for (auto Redecl : redecls()) { + for (auto *Redecl : redecls()) { if (Redecl->isInlineSpecified() && Redecl->getStorageClass() != SC_Extern) return true; @@ -3595,7 +3954,7 @@ bool FunctionDecl::isInlineDefinitionExternallyVisible() const { // [...] If all of the file scope declarations for a function in a // translation unit include the inline function specifier without extern, // then the definition in that translation unit is an inline definition. - for (auto Redecl : redecls()) { + for (auto *Redecl : redecls()) { if (RedeclForcesDefC99(Redecl)) return true; } @@ -3626,8 +3985,13 @@ const IdentifierInfo *FunctionDecl::getLiteralIdentifier() const { FunctionDecl::TemplatedKind FunctionDecl::getTemplatedKind() const { if (TemplateOrSpecialization.isNull()) return TK_NonTemplate; - if (TemplateOrSpecialization.is<FunctionTemplateDecl *>()) + if (const auto *ND = TemplateOrSpecialization.dyn_cast<NamedDecl *>()) { + if (isa<FunctionDecl>(ND)) + return TK_DependentNonTemplate; + assert(isa<FunctionTemplateDecl>(ND) && + "No other valid types in NamedDecl"); return TK_FunctionTemplate; + } if (TemplateOrSpecialization.is<MemberSpecializationInfo *>()) return TK_MemberSpecialization; if (TemplateOrSpecialization.is<FunctionTemplateSpecializationInfo *>()) @@ -3668,15 +4032,34 @@ FunctionDecl::setInstantiationOfMemberFunction(ASTContext &C, } FunctionTemplateDecl *FunctionDecl::getDescribedFunctionTemplate() const { - return TemplateOrSpecialization.dyn_cast<FunctionTemplateDecl *>(); + return dyn_cast_if_present<FunctionTemplateDecl>( + TemplateOrSpecialization.dyn_cast<NamedDecl *>()); } -void FunctionDecl::setDescribedFunctionTemplate(FunctionTemplateDecl *Template) { +void FunctionDecl::setDescribedFunctionTemplate( + FunctionTemplateDecl *Template) { assert(TemplateOrSpecialization.isNull() && "Member function is already a specialization"); TemplateOrSpecialization = Template; } +bool FunctionDecl::isFunctionTemplateSpecialization() const { + return TemplateOrSpecialization.is<FunctionTemplateSpecializationInfo *>() || + TemplateOrSpecialization + .is<DependentFunctionTemplateSpecializationInfo *>(); +} + +void FunctionDecl::setInstantiatedFromDecl(FunctionDecl *FD) { + assert(TemplateOrSpecialization.isNull() && + "Function is already a specialization"); + TemplateOrSpecialization = FD; +} + +FunctionDecl *FunctionDecl::getInstantiatedFromDecl() const { + return dyn_cast_if_present<FunctionDecl>( + TemplateOrSpecialization.dyn_cast<NamedDecl *>()); +} + bool FunctionDecl::isImplicitlyInstantiable() const { // If the function is invalid, it can't be implicitly instantiated. if (isInvalidDecl()) @@ -3800,23 +4183,27 @@ FunctionDecl::getTemplateSpecializationArgsAsWritten() const { .dyn_cast<FunctionTemplateSpecializationInfo*>()) { return Info->TemplateArgumentsAsWritten; } + if (DependentFunctionTemplateSpecializationInfo *Info = + TemplateOrSpecialization + .dyn_cast<DependentFunctionTemplateSpecializationInfo *>()) { + return Info->TemplateArgumentsAsWritten; + } return nullptr; } -void -FunctionDecl::setFunctionTemplateSpecialization(ASTContext &C, - FunctionTemplateDecl *Template, - const TemplateArgumentList *TemplateArgs, - void *InsertPos, - TemplateSpecializationKind TSK, - const TemplateArgumentListInfo *TemplateArgsAsWritten, - SourceLocation PointOfInstantiation) { +void FunctionDecl::setFunctionTemplateSpecialization( + ASTContext &C, FunctionTemplateDecl *Template, + TemplateArgumentList *TemplateArgs, void *InsertPos, + TemplateSpecializationKind TSK, + const TemplateArgumentListInfo *TemplateArgsAsWritten, + SourceLocation PointOfInstantiation) { assert((TemplateOrSpecialization.isNull() || TemplateOrSpecialization.is<MemberSpecializationInfo *>()) && "Member function is already a specialization"); assert(TSK != TSK_Undeclared && "Must specify the type of function template specialization"); assert((TemplateOrSpecialization.isNull() || + getFriendObjectKind() != FOK_None || TSK == TSK_ExplicitSpecialization) && "Member specialization must be an explicit specialization"); FunctionTemplateSpecializationInfo *Info = @@ -3828,10 +4215,9 @@ FunctionDecl::setFunctionTemplateSpecialization(ASTContext &C, Template->addSpecialization(Info, InsertPos); } -void -FunctionDecl::setDependentTemplateSpecialization(ASTContext &Context, - const UnresolvedSetImpl &Templates, - const TemplateArgumentListInfo &TemplateArgs) { +void FunctionDecl::setDependentTemplateSpecialization( + ASTContext &Context, const UnresolvedSetImpl &Templates, + const TemplateArgumentListInfo *TemplateArgs) { assert(TemplateOrSpecialization.isNull()); DependentFunctionTemplateSpecializationInfo *Info = DependentFunctionTemplateSpecializationInfo::Create(Context, Templates, @@ -3847,28 +4233,26 @@ FunctionDecl::getDependentSpecializationInfo() const { DependentFunctionTemplateSpecializationInfo * DependentFunctionTemplateSpecializationInfo::Create( - ASTContext &Context, const UnresolvedSetImpl &Ts, - const TemplateArgumentListInfo &TArgs) { - void *Buffer = Context.Allocate( - totalSizeToAlloc<TemplateArgumentLoc, FunctionTemplateDecl *>( - TArgs.size(), Ts.size())); - return new (Buffer) DependentFunctionTemplateSpecializationInfo(Ts, TArgs); + ASTContext &Context, const UnresolvedSetImpl &Candidates, + const TemplateArgumentListInfo *TArgs) { + const auto *TArgsWritten = + TArgs ? ASTTemplateArgumentListInfo::Create(Context, *TArgs) : nullptr; + return new (Context.Allocate( + totalSizeToAlloc<FunctionTemplateDecl *>(Candidates.size()))) + DependentFunctionTemplateSpecializationInfo(Candidates, TArgsWritten); } DependentFunctionTemplateSpecializationInfo:: -DependentFunctionTemplateSpecializationInfo(const UnresolvedSetImpl &Ts, - const TemplateArgumentListInfo &TArgs) - : AngleLocs(TArgs.getLAngleLoc(), TArgs.getRAngleLoc()) { - NumTemplates = Ts.size(); - NumArgs = TArgs.size(); - - FunctionTemplateDecl **TsArray = getTrailingObjects<FunctionTemplateDecl *>(); - for (unsigned I = 0, E = Ts.size(); I != E; ++I) - TsArray[I] = cast<FunctionTemplateDecl>(Ts[I]->getUnderlyingDecl()); - - TemplateArgumentLoc *ArgsArray = getTrailingObjects<TemplateArgumentLoc>(); - for (unsigned I = 0, E = TArgs.size(); I != E; ++I) - new (&ArgsArray[I]) TemplateArgumentLoc(TArgs[I]); + DependentFunctionTemplateSpecializationInfo( + const UnresolvedSetImpl &Candidates, + const ASTTemplateArgumentListInfo *TemplateArgsWritten) + : NumCandidates(Candidates.size()), + TemplateArgumentsAsWritten(TemplateArgsWritten) { + std::transform(Candidates.begin(), Candidates.end(), + getTrailingObjects<FunctionTemplateDecl *>(), + [](NamedDecl *ND) { + return cast<FunctionTemplateDecl>(ND->getUnderlyingDecl()); + }); } TemplateSpecializationKind FunctionDecl::getTemplateSpecializationKind() const { @@ -3883,6 +4267,13 @@ TemplateSpecializationKind FunctionDecl::getTemplateSpecializationKind() const { TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo *>()) return MSInfo->getTemplateSpecializationKind(); + // A dependent function template specialization is an explicit specialization, + // except when it's a friend declaration. + if (TemplateOrSpecialization + .is<DependentFunctionTemplateSpecializationInfo *>() && + getFriendObjectKind() == FOK_None) + return TSK_ExplicitSpecialization; + return TSK_Undeclared; } @@ -3897,6 +4288,11 @@ FunctionDecl::getTemplateSpecializationKindForInstantiation() const { // template<> void f<int>() {} // }; // + // Within the templated CXXRecordDecl, A<T>::f<int> is a dependent function + // template specialization; both getTemplateSpecializationKind() and + // getTemplateSpecializationKindForInstantiation() will return + // TSK_ExplicitSpecialization. + // // For A<int>::f<int>(): // * getTemplateSpecializationKind() will return TSK_ExplicitSpecialization // * getTemplateSpecializationKindForInstantiation() will return @@ -3917,6 +4313,11 @@ FunctionDecl::getTemplateSpecializationKindForInstantiation() const { TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo *>()) return MSInfo->getTemplateSpecializationKind(); + if (TemplateOrSpecialization + .is<DependentFunctionTemplateSpecializationInfo *>() && + getFriendObjectKind() == FOK_None) + return TSK_ExplicitSpecialization; + return TSK_Undeclared; } @@ -4061,6 +4462,10 @@ unsigned FunctionDecl::getMemoryFunctionKind() const { case Builtin::BIbzero: return Builtin::BIbzero; + case Builtin::BI__builtin_bcopy: + case Builtin::BIbcopy: + return Builtin::BIbcopy; + case Builtin::BIfree: return Builtin::BIfree; @@ -4092,6 +4497,8 @@ unsigned FunctionDecl::getMemoryFunctionKind() const { return Builtin::BIstrlen; if (FnInfo->isStr("bzero")) return Builtin::BIbzero; + if (FnInfo->isStr("bcopy")) + return Builtin::BIbcopy; } else if (isInStdNamespace()) { if (FnInfo->isStr("free")) return Builtin::BIfree; @@ -4129,14 +4536,14 @@ unsigned FunctionDecl::getODRHash() { FieldDecl *FieldDecl::Create(const ASTContext &C, DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc, - IdentifierInfo *Id, QualType T, + const IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo, Expr *BW, bool Mutable, InClassInitStyle InitStyle) { return new (C, DC) FieldDecl(Decl::Field, DC, StartLoc, IdLoc, Id, T, TInfo, BW, Mutable, InitStyle); } -FieldDecl *FieldDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +FieldDecl *FieldDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) { return new (C, ID) FieldDecl(Field, nullptr, SourceLocation(), SourceLocation(), nullptr, QualType(), nullptr, nullptr, false, ICIS_NoInit); @@ -4152,13 +4559,35 @@ bool FieldDecl::isAnonymousStructOrUnion() const { return false; } +Expr *FieldDecl::getInClassInitializer() const { + if (!hasInClassInitializer()) + return nullptr; + + LazyDeclStmtPtr InitPtr = BitField ? InitAndBitWidth->Init : Init; + return cast_if_present<Expr>( + InitPtr.isOffset() ? InitPtr.get(getASTContext().getExternalSource()) + : InitPtr.get(nullptr)); +} + +void FieldDecl::setInClassInitializer(Expr *NewInit) { + setLazyInClassInitializer(LazyDeclStmtPtr(NewInit)); +} + +void FieldDecl::setLazyInClassInitializer(LazyDeclStmtPtr NewInit) { + assert(hasInClassInitializer() && !getInClassInitializer()); + if (BitField) + InitAndBitWidth->Init = NewInit; + else + Init = NewInit; +} + unsigned FieldDecl::getBitWidthValue(const ASTContext &Ctx) const { assert(isBitField() && "not a bitfield"); return getBitWidth()->EvaluateKnownConstInt(Ctx).getZExtValue(); } bool FieldDecl::isZeroLengthBitField(const ASTContext &Ctx) const { - return isUnnamedBitfield() && !getBitWidth()->isValueDependent() && + return isUnnamedBitField() && !getBitWidth()->isValueDependent() && getBitWidthValue(Ctx) == 0; } @@ -4190,9 +4619,18 @@ bool FieldDecl::isZeroSize(const ASTContext &Ctx) const { // Otherwise, [...] the circumstances under which the object has zero size // are implementation-defined. - // FIXME: This might be Itanium ABI specific; we don't yet know what the MS - // ABI will do. - return true; + if (!Ctx.getTargetInfo().getCXXABI().isMicrosoft()) + return true; + + // MS ABI: has nonzero size if it is a class type with class type fields, + // whether or not they have nonzero size + return !llvm::any_of(CXXRD->fields(), [](const FieldDecl *Field) { + return Field->getType()->getAs<RecordType>(); + }); +} + +bool FieldDecl::isPotentiallyOverlapping() const { + return hasAttr<NoUniqueAddressAttr>() && getType()->getAsCXXRecordDecl(); } unsigned FieldDecl::getFieldIndex() const { @@ -4208,6 +4646,8 @@ unsigned FieldDecl::getFieldIndex() const { for (auto *Field : RD->fields()) { Field->getCanonicalDecl()->CachedFieldIndex = Index + 1; + assert(Field->getCanonicalDecl()->CachedFieldIndex == Index + 1 && + "overflow in field numbering"); ++Index; } @@ -4227,11 +4667,21 @@ SourceRange FieldDecl::getSourceRange() const { void FieldDecl::setCapturedVLAType(const VariableArrayType *VLAType) { assert((getParent()->isLambda() || getParent()->isCapturedRecord()) && "capturing type in non-lambda or captured record."); - assert(InitStorage.getInt() == ISK_NoInit && - InitStorage.getPointer() == nullptr && - "bit width, initializer or captured type already set"); - InitStorage.setPointerAndInt(const_cast<VariableArrayType *>(VLAType), - ISK_CapturedVLAType); + assert(StorageKind == ISK_NoInit && !BitField && + "bit-field or field with default member initializer cannot capture " + "VLA type"); + StorageKind = ISK_CapturedVLAType; + CapturedVLAType = VLAType; +} + +void FieldDecl::printName(raw_ostream &OS, const PrintingPolicy &Policy) const { + // Print unnamed members using name of their type. + if (isAnonymousStructOrUnion()) { + this->getType().print(OS, Policy); + return; + } + // Otherwise, do the normal printing. + DeclaratorDecl::printName(OS, Policy); } //===----------------------------------------------------------------------===// @@ -4243,8 +4693,8 @@ TagDecl::TagDecl(Kind DK, TagKind TK, const ASTContext &C, DeclContext *DC, SourceLocation StartL) : TypeDecl(DK, DC, L, Id, StartL), DeclContext(DK), redeclarable_base(C), TypedefNameDeclOrQualifier((TypedefNameDecl *)nullptr) { - assert((DK != Enum || TK == TTK_Enum) && - "EnumDecl not matched with TTK_Enum"); + assert((DK != Enum || TK == TagTypeKind::Enum) && + "EnumDecl not matched with TagTypeKind::Enum"); setPreviousDecl(PrevDecl); setTagKind(TK); setCompleteDefinition(false); @@ -4252,6 +4702,7 @@ TagDecl::TagDecl(Kind DK, TagKind TK, const ASTContext &C, DeclContext *DC, setEmbeddedInDeclarator(false); setFreeStanding(false); setCompleteDefinitionRequired(false); + TagDeclBits.IsThisDeclarationADemotedDefinition = false; } SourceLocation TagDecl::getOuterLocStart() const { @@ -4281,7 +4732,7 @@ void TagDecl::startDefinition() { if (auto *D = dyn_cast<CXXRecordDecl>(this)) { struct CXXRecordDecl::DefinitionData *Data = new (getASTContext()) struct CXXRecordDecl::DefinitionData(D); - for (auto I : redecls()) + for (auto *I : redecls()) cast<CXXRecordDecl>(I)->DefinitionData = Data; } } @@ -4314,7 +4765,7 @@ TagDecl *TagDecl::getDefinition() const { if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(this)) return CXXRD->getDefinition(); - for (auto R : redecls()) + for (auto *R : redecls()) if (R->isCompleteDefinition()) return R; @@ -4341,6 +4792,23 @@ void TagDecl::setQualifierInfo(NestedNameSpecifierLoc QualifierLoc) { } } +void TagDecl::printName(raw_ostream &OS, const PrintingPolicy &Policy) const { + DeclarationName Name = getDeclName(); + // If the name is supposed to have an identifier but does not have one, then + // the tag is anonymous and we should print it differently. + if (Name.isIdentifier() && !Name.getAsIdentifierInfo()) { + // If the caller wanted to print a qualified name, they've already printed + // the scope. And if the caller doesn't want that, the scope information + // is already printed as part of the type. + PrintingPolicy Copy(Policy); + Copy.SuppressScope = true; + getASTContext().getTagDeclType(this).print(OS, Copy); + return; + } + // Otherwise, do the normal printing. + Name.print(OS, Policy); +} + void TagDecl::setTemplateParameterListsInfo( ASTContext &Context, ArrayRef<TemplateParameterList *> TPLists) { assert(!TPLists.empty()); @@ -4359,7 +4827,7 @@ void TagDecl::setTemplateParameterListsInfo( EnumDecl::EnumDecl(ASTContext &C, DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, EnumDecl *PrevDecl, bool Scoped, bool ScopedUsingClassTag, bool Fixed) - : TagDecl(Enum, TTK_Enum, C, DC, IdLoc, Id, PrevDecl, StartLoc) { + : TagDecl(Enum, TagTypeKind::Enum, C, DC, IdLoc, Id, PrevDecl, StartLoc) { assert(Scoped || !ScopedUsingClassTag); IntegerType = nullptr; setNumPositiveBits(0); @@ -4385,7 +4853,7 @@ EnumDecl *EnumDecl::Create(ASTContext &C, DeclContext *DC, return Enum; } -EnumDecl *EnumDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +EnumDecl *EnumDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) { EnumDecl *Enum = new (C, ID) EnumDecl(C, nullptr, SourceLocation(), SourceLocation(), nullptr, nullptr, false, false, false); @@ -4483,6 +4951,32 @@ unsigned EnumDecl::getODRHash() { return ODRHash; } +SourceRange EnumDecl::getSourceRange() const { + auto Res = TagDecl::getSourceRange(); + // Set end-point to enum-base, e.g. enum foo : ^bar + if (auto *TSI = getIntegerTypeSourceInfo()) { + // TagDecl doesn't know about the enum base. + if (!getBraceRange().getEnd().isValid()) + Res.setEnd(TSI->getTypeLoc().getEndLoc()); + } + return Res; +} + +void EnumDecl::getValueRange(llvm::APInt &Max, llvm::APInt &Min) const { + unsigned Bitwidth = getASTContext().getIntWidth(getIntegerType()); + unsigned NumNegativeBits = getNumNegativeBits(); + unsigned NumPositiveBits = getNumPositiveBits(); + + if (NumNegativeBits) { + unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1); + Max = llvm::APInt(Bitwidth, 1) << (NumBits - 1); + Min = -Max; + } else { + Max = llvm::APInt(Bitwidth, 1) << NumPositiveBits; + Min = llvm::APInt::getZero(Bitwidth); + } +} + //===----------------------------------------------------------------------===// // RecordDecl Implementation //===----------------------------------------------------------------------===// @@ -4505,7 +4999,9 @@ RecordDecl::RecordDecl(Kind DK, TagKind TK, const ASTContext &C, setHasNonTrivialToPrimitiveDestructCUnion(false); setHasNonTrivialToPrimitiveCopyCUnion(false); setParamDestroyedInCallee(false); - setArgPassingRestrictions(APK_CanPassInRegs); + setArgPassingRestrictions(RecordArgPassingKind::CanPassInRegs); + setIsRandomized(false); + setODRHash(0); } RecordDecl *RecordDecl::Create(const ASTContext &C, TagKind TK, DeclContext *DC, @@ -4519,10 +5015,11 @@ RecordDecl *RecordDecl::Create(const ASTContext &C, TagKind TK, DeclContext *DC, return R; } -RecordDecl *RecordDecl::CreateDeserialized(const ASTContext &C, unsigned ID) { - RecordDecl *R = - new (C, ID) RecordDecl(Record, TTK_Struct, C, nullptr, SourceLocation(), - SourceLocation(), nullptr, nullptr); +RecordDecl *RecordDecl::CreateDeserialized(const ASTContext &C, + GlobalDeclID ID) { + RecordDecl *R = new (C, ID) + RecordDecl(Record, TagTypeKind::Struct, C, nullptr, SourceLocation(), + SourceLocation(), nullptr, nullptr); R->setMayHaveOutOfDateDef(C.getLangOpts().Modules); return R; } @@ -4564,7 +5061,10 @@ bool RecordDecl::isOrContainsUnion() const { RecordDecl::field_iterator RecordDecl::field_begin() const { if (hasExternalLexicalStorage() && !hasLoadedFieldsFromExternalStorage()) LoadFieldsFromExternalStorage(); - + // This is necessary for correctness for C++ with modules. + // FIXME: Come up with a test case that breaks without definition. + if (RecordDecl *D = getDefinition(); D && D != this) + return D->field_begin(); return field_iterator(decl_iterator(FirstDecl)); } @@ -4578,7 +5078,13 @@ void RecordDecl::completeDefinition() { // Layouts are dumped when computed, so if we are dumping for all complete // types, we need to force usage to get types that wouldn't be used elsewhere. - if (Ctx.getLangOpts().DumpRecordLayoutsComplete) + // + // If the type is dependent, then we can't compute its layout because there + // is no way for us to know the size or alignment of a dependent type. Also + // ignore declarations marked as invalid since 'getASTRecordLayout()' asserts + // on that. + if (Ctx.getLangOpts().DumpRecordLayoutsComplete && !isDependentType() && + !isInvalidDecl()) (void)Ctx.getASTRecordLayout(this); } @@ -4589,6 +5095,12 @@ bool RecordDecl::isMsStruct(const ASTContext &C) const { return hasAttr<MSStructAttr>() || C.getLangOpts().MSBitfields == 1; } +void RecordDecl::reorderDecls(const SmallVectorImpl<Decl *> &Decls) { + std::tie(FirstDecl, LastDecl) = DeclContext::BuildDeclChain(Decls, false); + LastDecl->NextInContextAndBits.setPointer(nullptr); + setIsRandomized(true); +} + void RecordDecl::LoadFieldsFromExternalStorage() const { ExternalASTSource *Source = getASTContext().getExternalSource(); assert(hasExternalLexicalStorage() && Source && "No external storage?"); @@ -4611,8 +5123,13 @@ void RecordDecl::LoadFieldsFromExternalStorage() const { if (Decls.empty()) return; - std::tie(FirstDecl, LastDecl) = BuildDeclChain(Decls, - /*FieldsAlreadyLoaded=*/false); + auto [ExternalFirst, ExternalLast] = + BuildDeclChain(Decls, + /*FieldsAlreadyLoaded=*/false); + ExternalLast->NextInContextAndBits.setPointer(FirstDecl); + FirstDecl = ExternalFirst; + if (!LastDecl) + LastDecl = ExternalLast; } bool RecordDecl::mayInsertExtraPadding(bool EmitRemark) const { @@ -4674,6 +5191,19 @@ const FieldDecl *RecordDecl::findFirstNamedDataMember() const { return nullptr; } +unsigned RecordDecl::getODRHash() { + if (hasODRHash()) + return RecordDeclBits.ODRHash; + + // Only calculate hash on first call of getODRHash per record. + ODRHash Hash; + Hash.AddRecordDecl(this); + // For RecordDecl the ODRHash is stored in the remaining 26 + // bit of RecordDeclBits, adjust the hash to accomodate. + setODRHash(Hash.CalculateHash() >> 6); + return RecordDeclBits.ODRHash; +} + //===----------------------------------------------------------------------===// // BlockDecl Implementation //===----------------------------------------------------------------------===// @@ -4735,6 +5265,13 @@ TranslationUnitDecl *TranslationUnitDecl::Create(ASTContext &C) { return new (C, (DeclContext *)nullptr) TranslationUnitDecl(C); } +void TranslationUnitDecl::setAnonymousNamespace(NamespaceDecl *D) { + AnonymousNamespace = D; + + if (ASTMutationListener *Listener = Ctx.getASTMutationListener()) + Listener->AddedAnonymousNamespace(this, D); +} + void PragmaCommentDecl::anchor() {} PragmaCommentDecl *PragmaCommentDecl::Create(const ASTContext &C, @@ -4751,7 +5288,7 @@ PragmaCommentDecl *PragmaCommentDecl::Create(const ASTContext &C, } PragmaCommentDecl *PragmaCommentDecl::CreateDeserialized(ASTContext &C, - unsigned ID, + GlobalDeclID ID, unsigned ArgSize) { return new (C, ID, additionalSizeToAlloc<char>(ArgSize + 1)) PragmaCommentDecl(nullptr, SourceLocation(), PCK_Unknown); @@ -4776,7 +5313,7 @@ PragmaDetectMismatchDecl::Create(const ASTContext &C, TranslationUnitDecl *DC, } PragmaDetectMismatchDecl * -PragmaDetectMismatchDecl::CreateDeserialized(ASTContext &C, unsigned ID, +PragmaDetectMismatchDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID, unsigned NameValueSize) { return new (C, ID, additionalSizeToAlloc<char>(NameValueSize + 1)) PragmaDetectMismatchDecl(nullptr, SourceLocation(), 0); @@ -4803,7 +5340,7 @@ LabelDecl *LabelDecl::Create(ASTContext &C, DeclContext *DC, return new (C, DC) LabelDecl(DC, IdentL, II, nullptr, GnuLabelL); } -LabelDecl *LabelDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +LabelDecl *LabelDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) { return new (C, ID) LabelDecl(nullptr, SourceLocation(), nullptr, nullptr, SourceLocation()); } @@ -4823,6 +5360,12 @@ bool ValueDecl::isWeak() const { MostRecent->hasAttr<WeakRefAttr>() || isWeakImported(); } +bool ValueDecl::isInitCapture() const { + if (auto *Var = llvm::dyn_cast<VarDecl>(this)) + return Var->isInitCapture(); + return false; +} + void ImplicitParamDecl::anchor() {} ImplicitParamDecl *ImplicitParamDecl::Create(ASTContext &C, DeclContext *DC, @@ -4838,37 +5381,35 @@ ImplicitParamDecl *ImplicitParamDecl::Create(ASTContext &C, QualType Type, } ImplicitParamDecl *ImplicitParamDecl::CreateDeserialized(ASTContext &C, - unsigned ID) { + GlobalDeclID ID) { return new (C, ID) ImplicitParamDecl(C, QualType(), ImplicitParamKind::Other); } -FunctionDecl *FunctionDecl::Create(ASTContext &C, DeclContext *DC, - SourceLocation StartLoc, - const DeclarationNameInfo &NameInfo, - QualType T, TypeSourceInfo *TInfo, - StorageClass SC, bool isInlineSpecified, - bool hasWrittenPrototype, - ConstexprSpecKind ConstexprKind, - Expr *TrailingRequiresClause) { - FunctionDecl *New = - new (C, DC) FunctionDecl(Function, C, DC, StartLoc, NameInfo, T, TInfo, - SC, isInlineSpecified, ConstexprKind, - TrailingRequiresClause); +FunctionDecl * +FunctionDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation StartLoc, + const DeclarationNameInfo &NameInfo, QualType T, + TypeSourceInfo *TInfo, StorageClass SC, bool UsesFPIntrin, + bool isInlineSpecified, bool hasWrittenPrototype, + ConstexprSpecKind ConstexprKind, + Expr *TrailingRequiresClause) { + FunctionDecl *New = new (C, DC) FunctionDecl( + Function, C, DC, StartLoc, NameInfo, T, TInfo, SC, UsesFPIntrin, + isInlineSpecified, ConstexprKind, TrailingRequiresClause); New->setHasWrittenPrototype(hasWrittenPrototype); return New; } -FunctionDecl *FunctionDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +FunctionDecl *FunctionDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) { return new (C, ID) FunctionDecl( Function, C, nullptr, SourceLocation(), DeclarationNameInfo(), QualType(), - nullptr, SC_None, false, ConstexprSpecKind::Unspecified, nullptr); + nullptr, SC_None, false, false, ConstexprSpecKind::Unspecified, nullptr); } BlockDecl *BlockDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L) { return new (C, DC) BlockDecl(DC, L); } -BlockDecl *BlockDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +BlockDecl *BlockDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) { return new (C, ID) BlockDecl(nullptr, SourceLocation()); } @@ -4882,7 +5423,7 @@ CapturedDecl *CapturedDecl::Create(ASTContext &C, DeclContext *DC, CapturedDecl(DC, NumParams); } -CapturedDecl *CapturedDecl::CreateDeserialized(ASTContext &C, unsigned ID, +CapturedDecl *CapturedDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID, unsigned NumParams) { return new (C, ID, additionalSizeToAlloc<ImplicitParamDecl *>(NumParams)) CapturedDecl(nullptr, NumParams); @@ -4894,16 +5435,23 @@ void CapturedDecl::setBody(Stmt *B) { BodyAndNothrow.setPointer(B); } bool CapturedDecl::isNothrow() const { return BodyAndNothrow.getInt(); } void CapturedDecl::setNothrow(bool Nothrow) { BodyAndNothrow.setInt(Nothrow); } +EnumConstantDecl::EnumConstantDecl(const ASTContext &C, DeclContext *DC, + SourceLocation L, IdentifierInfo *Id, + QualType T, Expr *E, const llvm::APSInt &V) + : ValueDecl(EnumConstant, DC, L, Id, T), Init((Stmt *)E) { + setInitVal(C, V); +} + EnumConstantDecl *EnumConstantDecl::Create(ASTContext &C, EnumDecl *CD, SourceLocation L, IdentifierInfo *Id, QualType T, Expr *E, const llvm::APSInt &V) { - return new (C, CD) EnumConstantDecl(CD, L, Id, T, E, V); + return new (C, CD) EnumConstantDecl(C, CD, L, Id, T, E, V); } -EnumConstantDecl * -EnumConstantDecl::CreateDeserialized(ASTContext &C, unsigned ID) { - return new (C, ID) EnumConstantDecl(nullptr, SourceLocation(), nullptr, +EnumConstantDecl *EnumConstantDecl::CreateDeserialized(ASTContext &C, + GlobalDeclID ID) { + return new (C, ID) EnumConstantDecl(C, nullptr, SourceLocation(), nullptr, QualType(), nullptr, llvm::APSInt()); } @@ -4923,15 +5471,16 @@ IndirectFieldDecl::IndirectFieldDecl(ASTContext &C, DeclContext *DC, IndirectFieldDecl * IndirectFieldDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L, - IdentifierInfo *Id, QualType T, + const IdentifierInfo *Id, QualType T, llvm::MutableArrayRef<NamedDecl *> CH) { return new (C, DC) IndirectFieldDecl(C, DC, L, Id, T, CH); } IndirectFieldDecl *IndirectFieldDecl::CreateDeserialized(ASTContext &C, - unsigned ID) { - return new (C, ID) IndirectFieldDecl(C, nullptr, SourceLocation(), - DeclarationName(), QualType(), None); + GlobalDeclID ID) { + return new (C, ID) + IndirectFieldDecl(C, nullptr, SourceLocation(), DeclarationName(), + QualType(), std::nullopt); } SourceRange EnumConstantDecl::getSourceRange() const { @@ -4945,7 +5494,8 @@ void TypeDecl::anchor() {} TypedefDecl *TypedefDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc, - IdentifierInfo *Id, TypeSourceInfo *TInfo) { + const IdentifierInfo *Id, + TypeSourceInfo *TInfo) { return new (C, DC) TypedefDecl(C, DC, StartLoc, IdLoc, Id, TInfo); } @@ -4988,19 +5538,21 @@ bool TypedefNameDecl::isTransparentTagSlow() const { return isTransparent; } -TypedefDecl *TypedefDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +TypedefDecl *TypedefDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) { return new (C, ID) TypedefDecl(C, nullptr, SourceLocation(), SourceLocation(), nullptr, nullptr); } TypeAliasDecl *TypeAliasDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation StartLoc, - SourceLocation IdLoc, IdentifierInfo *Id, + SourceLocation IdLoc, + const IdentifierInfo *Id, TypeSourceInfo *TInfo) { return new (C, DC) TypeAliasDecl(C, DC, StartLoc, IdLoc, Id, TInfo); } -TypeAliasDecl *TypeAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +TypeAliasDecl *TypeAliasDecl::CreateDeserialized(ASTContext &C, + GlobalDeclID ID) { return new (C, ID) TypeAliasDecl(C, nullptr, SourceLocation(), SourceLocation(), nullptr, nullptr); } @@ -5031,21 +5583,84 @@ FileScopeAsmDecl *FileScopeAsmDecl::Create(ASTContext &C, DeclContext *DC, } FileScopeAsmDecl *FileScopeAsmDecl::CreateDeserialized(ASTContext &C, - unsigned ID) { + GlobalDeclID ID) { return new (C, ID) FileScopeAsmDecl(nullptr, nullptr, SourceLocation(), SourceLocation()); } +void TopLevelStmtDecl::anchor() {} + +TopLevelStmtDecl *TopLevelStmtDecl::Create(ASTContext &C, Stmt *Statement) { + assert(C.getLangOpts().IncrementalExtensions && + "Must be used only in incremental mode"); + + SourceLocation Loc = Statement ? Statement->getBeginLoc() : SourceLocation(); + DeclContext *DC = C.getTranslationUnitDecl(); + + return new (C, DC) TopLevelStmtDecl(DC, Loc, Statement); +} + +TopLevelStmtDecl *TopLevelStmtDecl::CreateDeserialized(ASTContext &C, + GlobalDeclID ID) { + return new (C, ID) + TopLevelStmtDecl(/*DC=*/nullptr, SourceLocation(), /*S=*/nullptr); +} + +SourceRange TopLevelStmtDecl::getSourceRange() const { + return SourceRange(getLocation(), Statement->getEndLoc()); +} + +void TopLevelStmtDecl::setStmt(Stmt *S) { + assert(S); + Statement = S; + setLocation(Statement->getBeginLoc()); +} + void EmptyDecl::anchor() {} EmptyDecl *EmptyDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L) { return new (C, DC) EmptyDecl(DC, L); } -EmptyDecl *EmptyDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +EmptyDecl *EmptyDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) { return new (C, ID) EmptyDecl(nullptr, SourceLocation()); } +HLSLBufferDecl::HLSLBufferDecl(DeclContext *DC, bool CBuffer, + SourceLocation KwLoc, IdentifierInfo *ID, + SourceLocation IDLoc, SourceLocation LBrace) + : NamedDecl(Decl::Kind::HLSLBuffer, DC, IDLoc, DeclarationName(ID)), + DeclContext(Decl::Kind::HLSLBuffer), LBraceLoc(LBrace), KwLoc(KwLoc), + IsCBuffer(CBuffer) {} + +HLSLBufferDecl *HLSLBufferDecl::Create(ASTContext &C, + DeclContext *LexicalParent, bool CBuffer, + SourceLocation KwLoc, IdentifierInfo *ID, + SourceLocation IDLoc, + SourceLocation LBrace) { + // For hlsl like this + // cbuffer A { + // cbuffer B { + // } + // } + // compiler should treat it as + // cbuffer A { + // } + // cbuffer B { + // } + // FIXME: support nested buffers if required for back-compat. + DeclContext *DC = LexicalParent; + HLSLBufferDecl *Result = + new (C, DC) HLSLBufferDecl(DC, CBuffer, KwLoc, ID, IDLoc, LBrace); + return Result; +} + +HLSLBufferDecl *HLSLBufferDecl::CreateDeserialized(ASTContext &C, + GlobalDeclID ID) { + return new (C, ID) HLSLBufferDecl(nullptr, false, SourceLocation(), nullptr, + SourceLocation(), SourceLocation()); +} + //===----------------------------------------------------------------------===// // ImportDecl Implementation //===----------------------------------------------------------------------===// @@ -5097,7 +5712,7 @@ ImportDecl *ImportDecl::CreateImplicit(ASTContext &C, DeclContext *DC, return Import; } -ImportDecl *ImportDecl::CreateDeserialized(ASTContext &C, unsigned ID, +ImportDecl *ImportDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID, unsigned NumLocations) { return new (C, ID, additionalSizeToAlloc<SourceLocation>(NumLocations)) ImportDecl(EmptyShell()); @@ -5105,11 +5720,11 @@ ImportDecl *ImportDecl::CreateDeserialized(ASTContext &C, unsigned ID, ArrayRef<SourceLocation> ImportDecl::getIdentifierLocs() const { if (!isImportComplete()) - return None; + return std::nullopt; const auto *StoredLocs = getTrailingObjects<SourceLocation>(); - return llvm::makeArrayRef(StoredLocs, - getNumModuleIdentifiers(getImportedModule())); + return llvm::ArrayRef(StoredLocs, + getNumModuleIdentifiers(getImportedModule())); } SourceRange ImportDecl::getSourceRange() const { @@ -5130,6 +5745,21 @@ ExportDecl *ExportDecl::Create(ASTContext &C, DeclContext *DC, return new (C, DC) ExportDecl(DC, ExportLoc); } -ExportDecl *ExportDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +ExportDecl *ExportDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) { return new (C, ID) ExportDecl(nullptr, SourceLocation()); } + +bool clang::IsArmStreamingFunction(const FunctionDecl *FD, + bool IncludeLocallyStreaming) { + if (IncludeLocallyStreaming) + if (FD->hasAttr<ArmLocallyStreamingAttr>()) + return true; + + if (const Type *Ty = FD->getType().getTypePtrOrNull()) + if (const auto *FPT = Ty->getAs<FunctionProtoType>()) + if (FPT->getAArch64SMEAttributes() & + FunctionType::SME_PStateSMEnabledMask) + return true; + + return false; +} diff --git a/contrib/llvm-project/clang/lib/AST/DeclBase.cpp b/contrib/llvm-project/clang/lib/AST/DeclBase.cpp index 3467da2b549e..c4e948a38e26 100644 --- a/contrib/llvm-project/clang/lib/AST/DeclBase.cpp +++ b/contrib/llvm-project/clang/lib/AST/DeclBase.cpp @@ -29,7 +29,7 @@ #include "clang/AST/Type.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" -#include "clang/Basic/LangOptions.h" +#include "clang/Basic/Module.h" #include "clang/Basic/ObjCRuntime.h" #include "clang/Basic/PartialDiagnostic.h" #include "clang/Basic/SourceLocation.h" @@ -71,21 +71,20 @@ void Decl::updateOutOfDate(IdentifierInfo &II) const { #include "clang/AST/DeclNodes.inc" void *Decl::operator new(std::size_t Size, const ASTContext &Context, - unsigned ID, std::size_t Extra) { + GlobalDeclID ID, std::size_t Extra) { // Allocate an extra 8 bytes worth of storage, which ensures that the // resulting pointer will still be 8-byte aligned. - static_assert(sizeof(unsigned) * 2 >= alignof(Decl), - "Decl won't be misaligned"); + static_assert(sizeof(uint64_t) >= alignof(Decl), "Decl won't be misaligned"); void *Start = Context.Allocate(Size + Extra + 8); void *Result = (char*)Start + 8; - unsigned *PrefixPtr = (unsigned *)Result - 2; + uint64_t *PrefixPtr = (uint64_t *)Result - 1; - // Zero out the first 4 bytes; this is used to store the owning module ID. - PrefixPtr[0] = 0; + *PrefixPtr = ID.getRawValue(); - // Store the global declaration ID in the second 4 bytes. - PrefixPtr[1] = ID; + // We leave the upper 16 bits to store the module IDs. 48 bits should be + // sufficient to store a declaration ID. + assert(*PrefixPtr < llvm::maskTrailingOnes<uint64_t>(48)); return Result; } @@ -111,6 +110,29 @@ void *Decl::operator new(std::size_t Size, const ASTContext &Ctx, return ::operator new(Size + Extra, Ctx); } +GlobalDeclID Decl::getGlobalID() const { + if (!isFromASTFile()) + return GlobalDeclID(); + // See the comments in `Decl::operator new` for details. + uint64_t ID = *((const uint64_t *)this - 1); + return GlobalDeclID(ID & llvm::maskTrailingOnes<uint64_t>(48)); +} + +unsigned Decl::getOwningModuleID() const { + if (!isFromASTFile()) + return 0; + + uint64_t ID = *((const uint64_t *)this - 1); + return ID >> 48; +} + +void Decl::setOwningModuleID(unsigned ID) { + assert(isFromASTFile() && "Only works on a deserialized declaration"); + uint64_t *IDAddress = (uint64_t *)this - 1; + *IDAddress &= llvm::maskTrailingOnes<uint64_t>(48); + *IDAddress |= (uint64_t)ID << 48; +} + Module *Decl::getOwningModuleSlow() const { assert(isFromASTFile() && "Not from AST file?"); return getASTContext().getExternalSource()->getModule(getOwningModuleID()); @@ -152,6 +174,15 @@ void Decl::setInvalidDecl(bool Invalid) { } } +bool DeclContext::hasValidDeclKind() const { + switch (getDeclKind()) { +#define DECL(DERIVED, BASE) case Decl::DERIVED: return true; +#define ABSTRACT_DECL(DECL) +#include "clang/AST/DeclNodes.inc" + } + return false; +} + const char *DeclContext::getDeclKindName() const { switch (getDeclKind()) { #define DECL(DERIVED, BASE) case Decl::DERIVED: return #DERIVED; @@ -252,12 +283,12 @@ const TemplateParameterList *Decl::getDescribedTemplateParams() const { bool Decl::isTemplated() const { // A declaration is templated if it is a template or a template pattern, or - // is within (lexcially for a friend, semantically otherwise) a dependent - // context. - // FIXME: Should local extern declarations be treated like friends? + // is within (lexcially for a friend or local function declaration, + // semantically otherwise) a dependent context. if (auto *AsDC = dyn_cast<DeclContext>(this)) return AsDC->isDependentContext(); - auto *DC = getFriendObjectKind() ? getLexicalDeclContext() : getDeclContext(); + auto *DC = getFriendObjectKind() || isLocalExternDecl() + ? getLexicalDeclContext() : getDeclContext(); return DC->isDependentContext() || isTemplateDecl() || getDescribedTemplateParams(); } @@ -283,10 +314,10 @@ unsigned Decl::getTemplateDepth() const { return cast<Decl>(DC)->getTemplateDepth(); } -const DeclContext *Decl::getParentFunctionOrMethod() const { - for (const DeclContext *DC = getDeclContext(); - DC && !DC->isTranslationUnit() && !DC->isNamespace(); - DC = DC->getParent()) +const DeclContext *Decl::getParentFunctionOrMethod(bool LexicalParent) const { + for (const DeclContext *DC = LexicalParent ? getLexicalDeclContext() + : getDeclContext(); + DC && !DC->isFileContext(); DC = DC->getParent()) if (DC->isFunctionOrMethod()) return DC; @@ -393,7 +424,85 @@ bool Decl::isInAnonymousNamespace() const { bool Decl::isInStdNamespace() const { const DeclContext *DC = getDeclContext(); - return DC && DC->isStdNamespace(); + return DC && DC->getNonTransparentContext()->isStdNamespace(); +} + +bool Decl::isFileContextDecl() const { + const auto *DC = dyn_cast<DeclContext>(this); + return DC && DC->isFileContext(); +} + +bool Decl::isFlexibleArrayMemberLike( + ASTContext &Ctx, const Decl *D, QualType Ty, + LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel, + bool IgnoreTemplateOrMacroSubstitution) { + // For compatibility with existing code, we treat arrays of length 0 or + // 1 as flexible array members. + const auto *CAT = Ctx.getAsConstantArrayType(Ty); + if (CAT) { + using FAMKind = LangOptions::StrictFlexArraysLevelKind; + + llvm::APInt Size = CAT->getSize(); + if (StrictFlexArraysLevel == FAMKind::IncompleteOnly) + return false; + + // GCC extension, only allowed to represent a FAM. + if (Size.isZero()) + return true; + + if (StrictFlexArraysLevel == FAMKind::ZeroOrIncomplete && Size.uge(1)) + return false; + + if (StrictFlexArraysLevel == FAMKind::OneZeroOrIncomplete && Size.uge(2)) + return false; + } else if (!Ctx.getAsIncompleteArrayType(Ty)) { + return false; + } + + if (const auto *OID = dyn_cast_if_present<ObjCIvarDecl>(D)) + return OID->getNextIvar() == nullptr; + + const auto *FD = dyn_cast_if_present<FieldDecl>(D); + if (!FD) + return false; + + if (CAT) { + // GCC treats an array memeber of a union as an FAM if the size is one or + // zero. + llvm::APInt Size = CAT->getSize(); + if (FD->getParent()->isUnion() && (Size.isZero() || Size.isOne())) + return true; + } + + // Don't consider sizes resulting from macro expansions or template argument + // substitution to form C89 tail-padded arrays. + if (IgnoreTemplateOrMacroSubstitution) { + TypeSourceInfo *TInfo = FD->getTypeSourceInfo(); + while (TInfo) { + TypeLoc TL = TInfo->getTypeLoc(); + + // Look through typedefs. + if (TypedefTypeLoc TTL = TL.getAsAdjusted<TypedefTypeLoc>()) { + const TypedefNameDecl *TDL = TTL.getTypedefNameDecl(); + TInfo = TDL->getTypeSourceInfo(); + continue; + } + + if (auto CTL = TL.getAs<ConstantArrayTypeLoc>()) { + if (const Expr *SizeExpr = + dyn_cast_if_present<IntegerLiteral>(CTL.getSizeExpr()); + !SizeExpr || SizeExpr->getExprLoc().isMacroID()) + return false; + } + + break; + } + } + + // Test that the field is the last in the structure. + RecordDecl::field_iterator FI( + DeclContext::decl_iterator(const_cast<FieldDecl *>(FD))); + return ++FI == FD->getParent()->field_end(); } TranslationUnitDecl *Decl::getTranslationUnitDecl() { @@ -579,12 +688,29 @@ static AvailabilityResult CheckAvailability(ASTContext &Context, // Make sure that this declaration has already been introduced. if (!A->getIntroduced().empty() && EnclosingVersion < A->getIntroduced()) { - if (Message) { - Message->clear(); - llvm::raw_string_ostream Out(*Message); - VersionTuple VTI(A->getIntroduced()); - Out << "introduced in " << PrettyPlatformName << ' ' - << VTI << HintMessage; + IdentifierInfo *IIEnv = A->getEnvironment(); + StringRef TargetEnv = + Context.getTargetInfo().getTriple().getEnvironmentName(); + StringRef EnvName = llvm::Triple::getEnvironmentTypeName( + Context.getTargetInfo().getTriple().getEnvironment()); + // Matching environment or no environment on attribute + if (!IIEnv || (!TargetEnv.empty() && IIEnv->getName() == TargetEnv)) { + if (Message) { + Message->clear(); + llvm::raw_string_ostream Out(*Message); + VersionTuple VTI(A->getIntroduced()); + Out << "introduced in " << PrettyPlatformName << " " << VTI << " " + << EnvName << HintMessage; + } + } + // Non-matching environment or no environment on target + else { + if (Message) { + Message->clear(); + llvm::raw_string_ostream Out(*Message); + Out << "not available on " << PrettyPlatformName << " " << EnvName + << HintMessage; + } } return A->getStrict() ? AR_Unavailable : AR_NotYetIntroduced; @@ -749,6 +875,7 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) { case ObjCMethod: case ObjCProperty: case MSProperty: + case HLSLBuffer: return IDNS_Ordinary; case Label: return IDNS_Label; @@ -828,6 +955,7 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) { case LinkageSpec: case Export: case FileScopeAsm: + case TopLevelStmt: case StaticAssert: case ObjCPropertyImpl: case PragmaComment: @@ -838,13 +966,13 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) { case ExternCContext: case Decomposition: case MSGuid: + case UnnamedGlobalConstant: case TemplateParamObject: case UsingDirective: case BuiltinTemplate: case ClassTemplateSpecialization: case ClassTemplatePartialSpecialization: - case ClassScopeFunctionSpecialization: case VarTemplateSpecialization: case VarTemplatePartialSpecialization: case ObjCImplementation: @@ -858,6 +986,7 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) { case Empty: case LifetimeExtendedTemporary: case RequiresExprBody: + case ImplicitConceptSpecialization: // Never looked up by name. return 0; } @@ -912,20 +1041,14 @@ const AttrVec &Decl::getAttrs() const { Decl *Decl::castFromDeclContext (const DeclContext *D) { Decl::Kind DK = D->getDeclKind(); - switch(DK) { + switch (DK) { #define DECL(NAME, BASE) -#define DECL_CONTEXT(NAME) \ - case Decl::NAME: \ - return static_cast<NAME##Decl *>(const_cast<DeclContext *>(D)); -#define DECL_CONTEXT_BASE(NAME) +#define DECL_CONTEXT(NAME) \ + case Decl::NAME: \ + return static_cast<NAME##Decl *>(const_cast<DeclContext *>(D)); #include "clang/AST/DeclNodes.inc" - default: -#define DECL(NAME, BASE) -#define DECL_CONTEXT_BASE(NAME) \ - if (DK >= first##NAME && DK <= last##NAME) \ - return static_cast<NAME##Decl *>(const_cast<DeclContext *>(D)); -#include "clang/AST/DeclNodes.inc" - llvm_unreachable("a decl that inherits DeclContext isn't handled"); + default: + llvm_unreachable("a decl that inherits DeclContext isn't handled"); } } @@ -933,18 +1056,12 @@ DeclContext *Decl::castToDeclContext(const Decl *D) { Decl::Kind DK = D->getKind(); switch(DK) { #define DECL(NAME, BASE) -#define DECL_CONTEXT(NAME) \ - case Decl::NAME: \ - return static_cast<NAME##Decl *>(const_cast<Decl *>(D)); -#define DECL_CONTEXT_BASE(NAME) -#include "clang/AST/DeclNodes.inc" - default: -#define DECL(NAME, BASE) -#define DECL_CONTEXT_BASE(NAME) \ - if (DK >= first##NAME && DK <= last##NAME) \ - return static_cast<NAME##Decl *>(const_cast<Decl *>(D)); +#define DECL_CONTEXT(NAME) \ + case Decl::NAME: \ + return static_cast<NAME##Decl *>(const_cast<Decl *>(D)); #include "clang/AST/DeclNodes.inc" - llvm_unreachable("a decl that inherits DeclContext isn't handled"); + default: + llvm_unreachable("a decl that inherits DeclContext isn't handled"); } } @@ -964,7 +1081,7 @@ SourceLocation Decl::getBodyRBrace() const { return {}; } -bool Decl::AccessDeclContextSanity() const { +bool Decl::AccessDeclContextCheck() const { #ifndef NDEBUG // Suppress this check if any of the following hold: // 1. this is the translation unit (and thus has no parent) @@ -984,9 +1101,7 @@ bool Decl::AccessDeclContextSanity() const { isa<ParmVarDecl>(this) || // FIXME: a ClassTemplateSpecialization or CXXRecordDecl can have // AS_none as access specifier. - isa<CXXRecordDecl>(this) || - isa<ClassScopeFunctionSpecializationDecl>(this) || - isa<LifetimeExtendedTemporaryDecl>(this)) + isa<CXXRecordDecl>(this) || isa<LifetimeExtendedTemporaryDecl>(this)) return true; assert(Access != AS_none && @@ -995,6 +1110,65 @@ bool Decl::AccessDeclContextSanity() const { return true; } +bool Decl::isInExportDeclContext() const { + const DeclContext *DC = getLexicalDeclContext(); + + while (DC && !isa<ExportDecl>(DC)) + DC = DC->getLexicalParent(); + + return isa_and_nonnull<ExportDecl>(DC); +} + +bool Decl::isInAnotherModuleUnit() const { + auto *M = getOwningModule(); + + if (!M) + return false; + + // FIXME or NOTE: maybe we need to be clear about the semantics + // of clang header modules. e.g., if this lives in a clang header + // module included by the current unit, should we return false + // here? + // + // This is clear for header units as the specification says the + // header units live in a synthesised translation unit. So we + // can return false here. + M = M->getTopLevelModule(); + if (!M->isNamedModule()) + return false; + + return M != getASTContext().getCurrentNamedModule(); +} + +bool Decl::isInCurrentModuleUnit() const { + auto *M = getOwningModule(); + + if (!M || !M->isNamedModule()) + return false; + + return M == getASTContext().getCurrentNamedModule(); +} + +bool Decl::shouldEmitInExternalSource() const { + ExternalASTSource *Source = getASTContext().getExternalSource(); + if (!Source) + return false; + + return Source->hasExternalDefinitions(this) == ExternalASTSource::EK_Always; +} + +bool Decl::isFromExplicitGlobalModule() const { + return getOwningModule() && getOwningModule()->isExplicitGlobalModule(); +} + +bool Decl::isFromGlobalModule() const { + return getOwningModule() && getOwningModule()->isGlobalModule(); +} + +bool Decl::isInNamedModule() const { + return getOwningModule() && getOwningModule()->isNamedModule(); +} + static Decl::Kind getKind(const Decl *D) { return D->getKind(); } static Decl::Kind getKind(const DeclContext *DC) { return DC->getDeclKind(); } @@ -1004,7 +1178,9 @@ int64_t Decl::getID() const { const FunctionType *Decl::getFunctionType(bool BlocksToo) const { QualType Ty; - if (const auto *D = dyn_cast<ValueDecl>(this)) + if (isa<BindingDecl>(this)) + return nullptr; + else if (const auto *D = dyn_cast<ValueDecl>(this)) Ty = D->getType(); else if (const auto *D = dyn_cast<TypedefNameDecl>(this)) Ty = D->getUnderlyingType(); @@ -1021,6 +1197,23 @@ const FunctionType *Decl::getFunctionType(bool BlocksToo) const { return Ty->getAs<FunctionType>(); } +bool Decl::isFunctionPointerType() const { + QualType Ty; + if (const auto *D = dyn_cast<ValueDecl>(this)) + Ty = D->getType(); + else if (const auto *D = dyn_cast<TypedefNameDecl>(this)) + Ty = D->getUnderlyingType(); + else + return false; + + return Ty.getCanonicalType()->isFunctionPointerType(); +} + +DeclContext *Decl::getNonTransparentDeclContext() { + assert(getDeclContext()); + return getDeclContext()->getNonTransparentContext(); +} + /// Starting at a given context (a Decl or DeclContext), look for a /// code context that is not a closure (a lambda, block, etc.). template <class T> static Decl *getNonClosureContext(T *D) { @@ -1065,20 +1258,14 @@ DeclContext::DeclContext(Decl::Kind K) { } bool DeclContext::classof(const Decl *D) { - switch (D->getKind()) { + Decl::Kind DK = D->getKind(); + switch (DK) { #define DECL(NAME, BASE) #define DECL_CONTEXT(NAME) case Decl::NAME: -#define DECL_CONTEXT_BASE(NAME) #include "clang/AST/DeclNodes.inc" - return true; - default: -#define DECL(NAME, BASE) -#define DECL_CONTEXT_BASE(NAME) \ - if (D->getKind() >= Decl::first##NAME && \ - D->getKind() <= Decl::last##NAME) \ - return true; -#include "clang/AST/DeclNodes.inc" - return false; + return true; + default: + return false; } } @@ -1152,6 +1339,8 @@ bool DeclContext::isDependentContext() const { if (Record->isDependentLambda()) return true; + if (Record->isNeverDependentLambda()) + return false; } if (const auto *Function = dyn_cast<FunctionDecl>(this)) { @@ -1175,11 +1364,11 @@ bool DeclContext::isTransparentContext() const { if (getDeclKind() == Decl::Enum) return !cast<EnumDecl>(this)->isScoped(); - return getDeclKind() == Decl::LinkageSpec || getDeclKind() == Decl::Export; + return isa<LinkageSpecDecl, ExportDecl, HLSLBufferDecl>(this); } static bool isLinkageSpecContext(const DeclContext *DC, - LinkageSpecDecl::LanguageIDs ID) { + LinkageSpecLanguageIDs ID) { while (DC->getDeclKind() != Decl::TranslationUnit) { if (DC->getDeclKind() == Decl::LinkageSpec) return cast<LinkageSpecDecl>(DC)->getLanguage() == ID; @@ -1189,14 +1378,14 @@ static bool isLinkageSpecContext(const DeclContext *DC, } bool DeclContext::isExternCContext() const { - return isLinkageSpecContext(this, LinkageSpecDecl::lang_c); + return isLinkageSpecContext(this, LinkageSpecLanguageIDs::C); } const LinkageSpecDecl *DeclContext::getExternCContext() const { const DeclContext *DC = this; while (DC->getDeclKind() != Decl::TranslationUnit) { if (DC->getDeclKind() == Decl::LinkageSpec && - cast<LinkageSpecDecl>(DC)->getLanguage() == LinkageSpecDecl::lang_c) + cast<LinkageSpecDecl>(DC)->getLanguage() == LinkageSpecLanguageIDs::C) return cast<LinkageSpecDecl>(DC); DC = DC->getLexicalParent(); } @@ -1204,7 +1393,7 @@ const LinkageSpecDecl *DeclContext::getExternCContext() const { } bool DeclContext::isExternCXXContext() const { - return isLinkageSpecContext(this, LinkageSpecDecl::lang_cxx); + return isLinkageSpecContext(this, LinkageSpecLanguageIDs::CXX); } bool DeclContext::Encloses(const DeclContext *DC) const { @@ -1212,16 +1401,27 @@ bool DeclContext::Encloses(const DeclContext *DC) const { return getPrimaryContext()->Encloses(DC); for (; DC; DC = DC->getParent()) - if (DC->getPrimaryContext() == this) + if (!isa<LinkageSpecDecl>(DC) && !isa<ExportDecl>(DC) && + DC->getPrimaryContext() == this) return true; return false; } +DeclContext *DeclContext::getNonTransparentContext() { + DeclContext *DC = this; + while (DC->isTransparentContext()) { + DC = DC->getParent(); + assert(DC && "All transparent contexts should have a parent!"); + } + return DC; +} + DeclContext *DeclContext::getPrimaryContext() { switch (getDeclKind()) { case Decl::ExternCContext: case Decl::LinkageSpec: case Decl::Export: + case Decl::TopLevelStmt: case Decl::Block: case Decl::Captured: case Decl::OMPDeclareReduction: @@ -1230,11 +1430,19 @@ DeclContext *DeclContext::getPrimaryContext() { // There is only one DeclContext for these entities. return this; + case Decl::HLSLBuffer: + // Each buffer, even with the same name, is a distinct construct. + // Multiple buffers with the same name are allowed for backward + // compatibility. + // As long as buffers have unique resource bindings the names don't matter. + // The names get exposed via the CPU-side reflection API which + // supports querying bindings, so we cannot remove them. + return this; + case Decl::TranslationUnit: return static_cast<TranslationUnitDecl *>(this)->getFirstDecl(); case Decl::Namespace: - // The original namespace is our primary context. - return static_cast<NamespaceDecl *>(this)->getOriginalNamespace(); + return static_cast<NamespaceDecl *>(this)->getFirstDecl(); case Decl::ObjCMethod: return this; @@ -1515,7 +1723,11 @@ void DeclContext::removeDecl(Decl *D) { if (Map) { StoredDeclsMap::iterator Pos = Map->find(ND->getDeclName()); assert(Pos != Map->end() && "no lookup entry for decl"); - Pos->second.remove(ND); + StoredDeclsList &List = Pos->second; + List.remove(ND); + // Clean up the entry if there are no more decls. + if (List.isNull()) + Map->erase(Pos); } } while (DC->isTransparentContext() && (DC = DC->getParent())); } @@ -1634,9 +1846,9 @@ void DeclContext::buildLookupImpl(DeclContext *DCtx, bool Internal) { DeclContext::lookup_result DeclContext::lookup(DeclarationName Name) const { - assert(getDeclKind() != Decl::LinkageSpec && - getDeclKind() != Decl::Export && - "should not perform lookups into transparent contexts"); + // For transparent DeclContext, we should lookup in their enclosing context. + if (getDeclKind() == Decl::LinkageSpec || getDeclKind() == Decl::Export) + return getParent()->lookup(Name); const DeclContext *PrimaryContext = getPrimaryContext(); if (PrimaryContext != this) @@ -1699,9 +1911,9 @@ DeclContext::lookup(DeclarationName Name) const { DeclContext::lookup_result DeclContext::noload_lookup(DeclarationName Name) { - assert(getDeclKind() != Decl::LinkageSpec && - getDeclKind() != Decl::Export && - "should not perform lookups into transparent contexts"); + // For transparent DeclContext, we should lookup in their enclosing context. + if (getDeclKind() == Decl::LinkageSpec || getDeclKind() == Decl::Export) + return getParent()->noload_lookup(Name); DeclContext *PrimaryContext = getPrimaryContext(); if (PrimaryContext != this) @@ -1739,7 +1951,8 @@ void DeclContext::localUncachedLookup(DeclarationName Name, if (!hasExternalVisibleStorage() && !hasExternalLexicalStorage() && Name) { lookup_result LookupResults = lookup(Name); Results.insert(Results.end(), LookupResults.begin(), LookupResults.end()); - return; + if (!Results.empty()) + return; } // If we have a lookup table, check there first. Maybe we'll get lucky. @@ -1953,6 +2166,7 @@ void ASTContext::ReleaseDeclContextMaps() { // pointer because the subclass doesn't add anything that needs to // be deleted. StoredDeclsMap::DestroyAll(LastSDM.getPointer(), LastSDM.getInt()); + LastSDM.setPointer(nullptr); } void StoredDeclsMap::DestroyAll(StoredDeclsMap *Map, bool Dependent) { @@ -1995,3 +2209,7 @@ DependentDiagnostic *DependentDiagnostic::Create(ASTContext &C, return DD; } + +unsigned DeclIDBase::getLocalDeclIndex() const { + return ID & llvm::maskTrailingOnes<DeclID>(32); +} diff --git a/contrib/llvm-project/clang/lib/AST/DeclCXX.cpp b/contrib/llvm-project/clang/lib/AST/DeclCXX.cpp index aeee35d9c74f..9a3ede426e91 100644 --- a/contrib/llvm-project/clang/lib/AST/DeclCXX.cpp +++ b/contrib/llvm-project/clang/lib/AST/DeclCXX.cpp @@ -36,7 +36,7 @@ #include "clang/Basic/PartialDiagnostic.h" #include "clang/Basic/SourceLocation.h" #include "clang/Basic/Specifiers.h" -#include "llvm/ADT/None.h" +#include "clang/Basic/TargetInfo.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/iterator_range.h" @@ -57,7 +57,8 @@ using namespace clang; void AccessSpecDecl::anchor() {} -AccessSpecDecl *AccessSpecDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +AccessSpecDecl *AccessSpecDecl::CreateDeserialized(ASTContext &C, + GlobalDeclID ID) { return new (C, ID) AccessSpecDecl(EmptyShell()); } @@ -67,8 +68,8 @@ void LazyASTUnresolvedSet::getFromExternalSource(ASTContext &C) const { assert(Source && "getFromExternalSource with no external source"); for (ASTUnresolvedSet::iterator I = Impl.begin(); I != Impl.end(); ++I) - I.setDecl(cast<NamedDecl>(Source->GetExternalDecl( - reinterpret_cast<uintptr_t>(I.getDecl()) >> 2))); + I.setDecl( + cast<NamedDecl>(Source->GetExternalDecl(GlobalDeclID(I.getDeclID())))); Impl.Decls.setLazy(false); } @@ -79,10 +80,9 @@ CXXRecordDecl::DefinitionData::DefinitionData(CXXRecordDecl *D) HasBasesWithFields(false), HasBasesWithNonStaticDataMembers(false), HasPrivateFields(false), HasProtectedFields(false), HasPublicFields(false), HasMutableFields(false), HasVariantMembers(false), - HasOnlyCMembers(true), HasInClassInitializer(false), + HasOnlyCMembers(true), HasInitMethod(false), HasInClassInitializer(false), HasUninitializedReferenceMember(false), HasUninitializedFields(false), - HasInheritedConstructor(false), - HasInheritedDefaultConstructor(false), + HasInheritedConstructor(false), HasInheritedDefaultConstructor(false), HasInheritedAssignment(false), NeedOverloadResolutionForCopyConstructor(false), NeedOverloadResolutionForMoveConstructor(false), @@ -147,25 +147,25 @@ CXXRecordDecl *CXXRecordDecl::Create(const ASTContext &C, TagKind TK, CXXRecordDecl * CXXRecordDecl::CreateLambda(const ASTContext &C, DeclContext *DC, TypeSourceInfo *Info, SourceLocation Loc, - bool Dependent, bool IsGeneric, + unsigned DependencyKind, bool IsGeneric, LambdaCaptureDefault CaptureDefault) { - auto *R = new (C, DC) CXXRecordDecl(CXXRecord, TTK_Class, C, DC, Loc, Loc, - nullptr, nullptr); + auto *R = new (C, DC) CXXRecordDecl(CXXRecord, TagTypeKind::Class, C, DC, Loc, + Loc, nullptr, nullptr); R->setBeingDefined(true); - R->DefinitionData = - new (C) struct LambdaDefinitionData(R, Info, Dependent, IsGeneric, - CaptureDefault); + R->DefinitionData = new (C) struct LambdaDefinitionData( + R, Info, DependencyKind, IsGeneric, CaptureDefault); R->setMayHaveOutOfDateDef(false); R->setImplicit(true); + C.getTypeDeclType(R, /*PrevDecl=*/nullptr); return R; } -CXXRecordDecl * -CXXRecordDecl::CreateDeserialized(const ASTContext &C, unsigned ID) { - auto *R = new (C, ID) CXXRecordDecl( - CXXRecord, TTK_Struct, C, nullptr, SourceLocation(), SourceLocation(), - nullptr, nullptr); +CXXRecordDecl *CXXRecordDecl::CreateDeserialized(const ASTContext &C, + GlobalDeclID ID) { + auto *R = new (C, ID) + CXXRecordDecl(CXXRecord, TagTypeKind::Struct, C, nullptr, + SourceLocation(), SourceLocation(), nullptr, nullptr); R->setMayHaveOutOfDateDef(false); return R; } @@ -178,6 +178,8 @@ static bool hasRepeatedBaseClass(const CXXRecordDecl *StartRD) { SmallVector<const CXXRecordDecl*, 8> WorkList = {StartRD}; while (!WorkList.empty()) { const CXXRecordDecl *RD = WorkList.pop_back_val(); + if (RD->getTypeForDecl()->isDependentType()) + continue; for (const CXXBaseSpecifier &BaseSpec : RD->bases()) { if (const CXXRecordDecl *B = BaseSpec.getType()->getAsCXXRecordDecl()) { if (!SeenBaseTypes.insert(B).second) @@ -399,10 +401,11 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases, // C++11 [class.ctor]p6: // If that user-written default constructor would satisfy the - // requirements of a constexpr constructor, the implicitly-defined - // default constructor is constexpr. + // requirements of a constexpr constructor/function(C++23), the + // implicitly-defined default constructor is constexpr. if (!BaseClassDecl->hasConstexprDefaultConstructor()) - data().DefaultedDefaultConstructorIsConstexpr = false; + data().DefaultedDefaultConstructorIsConstexpr = + C.getLangOpts().CPlusPlus23; // C++1z [class.copy]p8: // The implicitly-declared copy constructor for a class X will have @@ -445,8 +448,8 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases, setHasVolatileMember(true); if (BaseClassDecl->getArgPassingRestrictions() == - RecordDecl::APK_CanNeverPassInRegs) - setArgPassingRestrictions(RecordDecl::APK_CanNeverPassInRegs); + RecordArgPassingKind::CanNeverPassInRegs) + setArgPassingRestrictions(RecordArgPassingKind::CanNeverPassInRegs); // Keep track of the presence of mutable fields. if (BaseClassDecl->hasMutableFields()) @@ -547,7 +550,8 @@ void CXXRecordDecl::addedClassSubobject(CXXRecordDecl *Subobj) { // -- for every subobject of class type or (possibly multi-dimensional) // array thereof, that class type shall have a constexpr destructor if (!Subobj->hasConstexprDestructor()) - data().DefaultedDestructorIsConstexpr = false; + data().DefaultedDestructorIsConstexpr = + getASTContext().getLangOpts().CPlusPlus23; // C++20 [temp.param]p7: // A structural type is [...] a literal class type [for which] the types @@ -557,6 +561,42 @@ void CXXRecordDecl::addedClassSubobject(CXXRecordDecl *Subobj) { data().StructuralIfLiteral = false; } +const CXXRecordDecl *CXXRecordDecl::getStandardLayoutBaseWithFields() const { + assert( + isStandardLayout() && + "getStandardLayoutBaseWithFields called on a non-standard-layout type"); +#ifdef EXPENSIVE_CHECKS + { + unsigned NumberOfBasesWithFields = 0; + if (!field_empty()) + ++NumberOfBasesWithFields; + llvm::SmallPtrSet<const CXXRecordDecl *, 8> UniqueBases; + forallBases([&](const CXXRecordDecl *Base) -> bool { + if (!Base->field_empty()) + ++NumberOfBasesWithFields; + assert( + UniqueBases.insert(Base->getCanonicalDecl()).second && + "Standard layout struct has multiple base classes of the same type"); + return true; + }); + assert(NumberOfBasesWithFields <= 1 && + "Standard layout struct has fields declared in more than one class"); + } +#endif + if (!field_empty()) + return this; + const CXXRecordDecl *Result = this; + forallBases([&](const CXXRecordDecl *Base) -> bool { + if (!Base->field_empty()) { + // This is the base where the fields are declared; return early + Result = Base; + return false; + } + return true; + }); + return Result; +} + bool CXXRecordDecl::hasConstexprDestructor() const { auto *Dtor = getDestructor(); return Dtor ? Dtor->isConstexpr() : defaultedDestructorIsConstexpr(); @@ -586,6 +626,19 @@ bool CXXRecordDecl::isTriviallyCopyable() const { return true; } +bool CXXRecordDecl::isTriviallyCopyConstructible() const { + + // A trivially copy constructible class is a class that: + // -- has no non-trivial copy constructors, + if (hasNonTrivialCopyConstructor()) + return false; + // -- has a trivial destructor. + if (!hasTrivialDestructor()) + return false; + + return true; +} + void CXXRecordDecl::markedVirtualFunctionPure() { // C++ [class.abstract]p2: // A class is abstract if it has at least one pure virtual function. @@ -652,12 +705,15 @@ bool CXXRecordDecl::hasSubobjectAtOffsetZeroOfEmptyBaseType( for (auto *FD : X->fields()) { // FIXME: Should we really care about the type of the first non-static // data member of a non-union if there are preceding unnamed bit-fields? - if (FD->isUnnamedBitfield()) + if (FD->isUnnamedBitField()) continue; if (!IsFirstField && !FD->isZeroSize(Ctx)) continue; + if (FD->isInvalidDecl()) + continue; + // -- If X is n array type, [visit the element type] QualType T = Ctx.getBaseElementType(FD->getType()); if (auto *RD = T->getAsCXXRecordDecl()) @@ -685,17 +741,16 @@ bool CXXRecordDecl::lambdaIsDefaultConstructibleAndAssignable() const { // C++17 [expr.prim.lambda]p21: // The closure type associated with a lambda-expression has no default // constructor and a deleted copy assignment operator. - if (getLambdaCaptureDefault() != LCD_None || capture_size() != 0) + if (!isCapturelessLambda()) return false; return getASTContext().getLangOpts().CPlusPlus20; } void CXXRecordDecl::addedMember(Decl *D) { - if (!D->isImplicit() && - !isa<FieldDecl>(D) && - !isa<IndirectFieldDecl>(D) && - (!isa<TagDecl>(D) || cast<TagDecl>(D)->getTagKind() == TTK_Class || - cast<TagDecl>(D)->getTagKind() == TTK_Interface)) + if (!D->isImplicit() && !isa<FieldDecl>(D) && !isa<IndirectFieldDecl>(D) && + (!isa<TagDecl>(D) || + cast<TagDecl>(D)->getTagKind() == TagTypeKind::Class || + cast<TagDecl>(D)->getTagKind() == TagTypeKind::Interface)) data().HasOnlyCMembers = false; // Ignore friends and invalid declarations. @@ -767,12 +822,16 @@ void CXXRecordDecl::addedMember(Decl *D) { // Note that we have a user-declared constructor. data().UserDeclaredConstructor = true; - // C++ [class]p4: - // A POD-struct is an aggregate class [...] - // Since the POD bit is meant to be C++03 POD-ness, clear it even if - // the type is technically an aggregate in C++0x since it wouldn't be - // in 03. - data().PlainOldData = false; + const TargetInfo &TI = getASTContext().getTargetInfo(); + if ((!Constructor->isDeleted() && !Constructor->isDefaulted()) || + !TI.areDefaultedSMFStillPOD(getLangOpts())) { + // C++ [class]p4: + // A POD-struct is an aggregate class [...] + // Since the POD bit is meant to be C++03 POD-ness, clear it even if + // the type is technically an aggregate in C++0x since it wouldn't be + // in 03. + data().PlainOldData = false; + } } if (Constructor->isDefaultConstructor()) { @@ -824,34 +883,16 @@ void CXXRecordDecl::addedMember(Decl *D) { data().HasInheritedDefaultConstructor = true; } - // Handle destructors. - if (const auto *DD = dyn_cast<CXXDestructorDecl>(D)) { - SMKind |= SMF_Destructor; - - if (DD->isUserProvided()) - data().HasIrrelevantDestructor = false; - // If the destructor is explicitly defaulted and not trivial or not public - // or if the destructor is deleted, we clear HasIrrelevantDestructor in - // finishedDefaultedOrDeletedMember. - - // C++11 [class.dtor]p5: - // A destructor is trivial if [...] the destructor is not virtual. - if (DD->isVirtual()) { - data().HasTrivialSpecialMembers &= ~SMF_Destructor; - data().HasTrivialSpecialMembersForCall &= ~SMF_Destructor; - } - - if (DD->isNoReturn()) - data().IsAnyDestructorNoReturn = true; - } - // Handle member functions. if (const auto *Method = dyn_cast<CXXMethodDecl>(D)) { + if (isa<CXXDestructorDecl>(D)) + SMKind |= SMF_Destructor; + if (Method->isCopyAssignmentOperator()) { SMKind |= SMF_CopyAssignment; const auto *ParamTy = - Method->getParamDecl(0)->getType()->getAs<ReferenceType>(); + Method->getNonObjectParameter(0)->getType()->getAs<ReferenceType>(); if (!ParamTy || ParamTy->getPointeeType().isConstQualified()) data().HasDeclaredCopyAssignmentWithConstParam = true; } @@ -892,46 +933,38 @@ void CXXRecordDecl::addedMember(Decl *D) { data().HasTrivialSpecialMembersForCall &= data().DeclaredSpecialMembers | ~SMKind; - if (!Method->isImplicit() && !Method->isUserProvided()) { - // This method is user-declared but not user-provided. We can't work out - // whether it's trivial yet (not until we get to the end of the class). - // We'll handle this method in finishedDefaultedOrDeletedMember. - } else if (Method->isTrivial()) { - data().HasTrivialSpecialMembers |= SMKind; - data().HasTrivialSpecialMembersForCall |= SMKind; - } else if (Method->isTrivialForCall()) { - data().HasTrivialSpecialMembersForCall |= SMKind; - data().DeclaredNonTrivialSpecialMembers |= SMKind; - } else { - data().DeclaredNonTrivialSpecialMembers |= SMKind; - // If this is a user-provided function, do not set - // DeclaredNonTrivialSpecialMembersForCall here since we don't know - // yet whether the method would be considered non-trivial for the - // purpose of calls (attribute "trivial_abi" can be dropped from the - // class later, which can change the special method's triviality). - if (!Method->isUserProvided()) - data().DeclaredNonTrivialSpecialMembersForCall |= SMKind; - } - // Note when we have declared a declared special member, and suppress the // implicit declaration of this special member. data().DeclaredSpecialMembers |= SMKind; - if (!Method->isImplicit()) { data().UserDeclaredSpecialMembers |= SMKind; - // C++03 [class]p4: - // A POD-struct is an aggregate class that has [...] no user-defined - // copy assignment operator and no user-defined destructor. - // - // Since the POD bit is meant to be C++03 POD-ness, and in C++03, - // aggregates could not have any constructors, clear it even for an - // explicitly defaulted or deleted constructor. - // type is technically an aggregate in C++0x since it wouldn't be in 03. - // - // Also, a user-declared move assignment operator makes a class non-POD. - // This is an extension in C++03. - data().PlainOldData = false; + const TargetInfo &TI = getASTContext().getTargetInfo(); + if ((!Method->isDeleted() && !Method->isDefaulted() && + SMKind != SMF_MoveAssignment) || + !TI.areDefaultedSMFStillPOD(getLangOpts())) { + // C++03 [class]p4: + // A POD-struct is an aggregate class that has [...] no user-defined + // copy assignment operator and no user-defined destructor. + // + // Since the POD bit is meant to be C++03 POD-ness, and in C++03, + // aggregates could not have any constructors, clear it even for an + // explicitly defaulted or deleted constructor. + // type is technically an aggregate in C++0x since it wouldn't be in + // 03. + // + // Also, a user-declared move assignment operator makes a class + // non-POD. This is an extension in C++03. + data().PlainOldData = false; + } + } + // When instantiating a class, we delay updating the destructor and + // triviality properties of the class until selecting a destructor and + // computing the eligibility of its special member functions. This is + // because there might be function constraints that we need to evaluate + // and compare later in the instantiation. + if (!Method->isIneligibleOrNotSelected()) { + addedEligibleSpecialMemberFunction(Method, SMKind); } } @@ -954,7 +987,7 @@ void CXXRecordDecl::addedMember(Decl *D) { // A declaration for a bit-field that omits the identifier declares an // unnamed bit-field. Unnamed bit-fields are not members and cannot be // initialized. - if (Field->isUnnamedBitfield()) { + if (Field->isUnnamedBitField()) { // C++ [meta.unary.prop]p4: [LWG2358] // T is a class type [...] with [...] no unnamed bit-fields of non-zero // length @@ -1053,7 +1086,7 @@ void CXXRecordDecl::addedMember(Decl *D) { // Structs with __weak fields should never be passed directly. if (LT == Qualifiers::OCL_Weak) - setArgPassingRestrictions(RecordDecl::APK_CanNeverPassInRegs); + setArgPassingRestrictions(RecordArgPassingKind::CanNeverPassInRegs); Data.HasIrrelevantDestructor = false; @@ -1247,8 +1280,8 @@ void CXXRecordDecl::addedMember(Decl *D) { if (FieldRec->hasVolatileMember()) setHasVolatileMember(true); if (FieldRec->getArgPassingRestrictions() == - RecordDecl::APK_CanNeverPassInRegs) - setArgPassingRestrictions(RecordDecl::APK_CanNeverPassInRegs); + RecordArgPassingKind::CanNeverPassInRegs) + setArgPassingRestrictions(RecordArgPassingKind::CanNeverPassInRegs); // C++0x [class]p7: // A standard-layout class is a class that: @@ -1306,7 +1339,8 @@ void CXXRecordDecl::addedMember(Decl *D) { !FieldRec->hasConstexprDefaultConstructor() && !isUnion()) // The standard requires any in-class initializer to be a constant // expression. We consider this to be a defect. - data().DefaultedDefaultConstructorIsConstexpr = false; + data().DefaultedDefaultConstructorIsConstexpr = + Context.getLangOpts().CPlusPlus23; // C++11 [class.copy]p8: // The implicitly-declared copy constructor for a class X will have @@ -1392,6 +1426,83 @@ void CXXRecordDecl::addedMember(Decl *D) { } } +bool CXXRecordDecl::isLiteral() const { + const LangOptions &LangOpts = getLangOpts(); + if (!(LangOpts.CPlusPlus20 ? hasConstexprDestructor() + : hasTrivialDestructor())) + return false; + + if (hasNonLiteralTypeFieldsOrBases()) { + // CWG2598 + // is an aggregate union type that has either no variant + // members or at least one variant member of non-volatile literal type, + if (!isUnion()) + return false; + bool HasAtLeastOneLiteralMember = + fields().empty() || any_of(fields(), [this](const FieldDecl *D) { + return !D->getType().isVolatileQualified() && + D->getType()->isLiteralType(getASTContext()); + }); + if (!HasAtLeastOneLiteralMember) + return false; + } + + return isAggregate() || (isLambda() && LangOpts.CPlusPlus17) || + hasConstexprNonCopyMoveConstructor() || hasTrivialDefaultConstructor(); +} + +void CXXRecordDecl::addedSelectedDestructor(CXXDestructorDecl *DD) { + DD->setIneligibleOrNotSelected(false); + addedEligibleSpecialMemberFunction(DD, SMF_Destructor); +} + +void CXXRecordDecl::addedEligibleSpecialMemberFunction(const CXXMethodDecl *MD, + unsigned SMKind) { + // FIXME: We shouldn't change DeclaredNonTrivialSpecialMembers if `MD` is + // a function template, but this needs CWG attention before we break ABI. + // See https://github.com/llvm/llvm-project/issues/59206 + + if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) { + if (DD->isUserProvided()) + data().HasIrrelevantDestructor = false; + // If the destructor is explicitly defaulted and not trivial or not public + // or if the destructor is deleted, we clear HasIrrelevantDestructor in + // finishedDefaultedOrDeletedMember. + + // C++11 [class.dtor]p5: + // A destructor is trivial if [...] the destructor is not virtual. + if (DD->isVirtual()) { + data().HasTrivialSpecialMembers &= ~SMF_Destructor; + data().HasTrivialSpecialMembersForCall &= ~SMF_Destructor; + } + + if (DD->isNoReturn()) + data().IsAnyDestructorNoReturn = true; + } + + if (!MD->isImplicit() && !MD->isUserProvided()) { + // This method is user-declared but not user-provided. We can't work + // out whether it's trivial yet (not until we get to the end of the + // class). We'll handle this method in + // finishedDefaultedOrDeletedMember. + } else if (MD->isTrivial()) { + data().HasTrivialSpecialMembers |= SMKind; + data().HasTrivialSpecialMembersForCall |= SMKind; + } else if (MD->isTrivialForCall()) { + data().HasTrivialSpecialMembersForCall |= SMKind; + data().DeclaredNonTrivialSpecialMembers |= SMKind; + } else { + data().DeclaredNonTrivialSpecialMembers |= SMKind; + // If this is a user-provided function, do not set + // DeclaredNonTrivialSpecialMembersForCall here since we don't know + // yet whether the method would be considered non-trivial for the + // purpose of calls (attribute "trivial_abi" can be dropped from the + // class later, which can change the special method's triviality). + if (!MD->isUserProvided()) + data().DeclaredNonTrivialSpecialMembersForCall |= SMKind; + } +} + void CXXRecordDecl::finishedDefaultedOrDeletedMember(CXXMethodDecl *D) { assert(!D->isImplicit() && !D->isUserProvided()); @@ -1422,10 +1533,21 @@ void CXXRecordDecl::finishedDefaultedOrDeletedMember(CXXMethodDecl *D) { // Update which trivial / non-trivial special members we have. // addedMember will have skipped this step for this member. - if (D->isTrivial()) - data().HasTrivialSpecialMembers |= SMKind; - else - data().DeclaredNonTrivialSpecialMembers |= SMKind; + if (!D->isIneligibleOrNotSelected()) { + if (D->isTrivial()) + data().HasTrivialSpecialMembers |= SMKind; + else + data().DeclaredNonTrivialSpecialMembers |= SMKind; + } +} + +void CXXRecordDecl::LambdaDefinitionData::AddCaptureList(ASTContext &Ctx, + Capture *CaptureList) { + Captures.push_back(CaptureList); + if (Captures.size() == 2) { + // The TinyPtrVector member now needs destruction. + Ctx.addDestruction(&Captures); + } } void CXXRecordDecl::setCaptures(ASTContext &Context, @@ -1435,14 +1557,15 @@ void CXXRecordDecl::setCaptures(ASTContext &Context, // Copy captures. Data.NumCaptures = Captures.size(); Data.NumExplicitCaptures = 0; - Data.Captures = (LambdaCapture *)Context.Allocate(sizeof(LambdaCapture) * - Captures.size()); - LambdaCapture *ToCapture = Data.Captures; - for (unsigned I = 0, N = Captures.size(); I != N; ++I) { - if (Captures[I].isExplicit()) + auto *ToCapture = (LambdaCapture *)Context.Allocate(sizeof(LambdaCapture) * + Captures.size()); + Data.AddCaptureList(Context, ToCapture); + for (const LambdaCapture &C : Captures) { + if (C.isExplicit()) ++Data.NumExplicitCaptures; - *ToCapture++ = Captures[I]; + new (ToCapture) LambdaCapture(C); + ToCapture++; } if (!lambdaIsDefaultConstructibleAndAssignable()) @@ -1467,7 +1590,8 @@ void CXXRecordDecl::setTrivialForCallFlags(CXXMethodDecl *D) { } bool CXXRecordDecl::isCLike() const { - if (getTagKind() == TTK_Class || getTagKind() == TTK_Interface || + if (getTagKind() == TagTypeKind::Class || + getTagKind() == TagTypeKind::Interface || !TemplateOrInstantiation.isNull()) return false; if (!hasDefinition()) @@ -1483,10 +1607,9 @@ bool CXXRecordDecl::isGenericLambda() const { #ifndef NDEBUG static bool allLookupResultsAreTheSame(const DeclContext::lookup_result &R) { - for (auto *D : R) - if (!declaresSameEntity(D, R.front())) - return false; - return true; + return llvm::all_of(R, [&](NamedDecl *D) { + return D->isInvalidDecl() || declaresSameEntity(D, R.front()); + }); } #endif @@ -1555,21 +1678,23 @@ CXXMethodDecl *CXXRecordDecl::getLambdaStaticInvoker(CallingConv CC) const { } void CXXRecordDecl::getCaptureFields( - llvm::DenseMap<const VarDecl *, FieldDecl *> &Captures, - FieldDecl *&ThisCapture) const { + llvm::DenseMap<const ValueDecl *, FieldDecl *> &Captures, + FieldDecl *&ThisCapture) const { Captures.clear(); ThisCapture = nullptr; LambdaDefinitionData &Lambda = getLambdaData(); - RecordDecl::field_iterator Field = field_begin(); - for (const LambdaCapture *C = Lambda.Captures, *CEnd = C + Lambda.NumCaptures; - C != CEnd; ++C, ++Field) { - if (C->capturesThis()) - ThisCapture = *Field; - else if (C->capturesVariable()) - Captures[C->getCapturedVar()] = *Field; + for (const LambdaCapture *List : Lambda.Captures) { + RecordDecl::field_iterator Field = field_begin(); + for (const LambdaCapture *C = List, *CEnd = C + Lambda.NumCaptures; + C != CEnd; ++C, ++Field) { + if (C->capturesThis()) + ThisCapture = *Field; + else if (C->capturesVariable()) + Captures[C->getCapturedVar()] = *Field; + } + assert(Field == field_end()); } - assert(Field == field_end()); } TemplateParameterList * @@ -1593,7 +1718,7 @@ CXXRecordDecl::getLambdaExplicitTemplateParameters() const { const auto ExplicitEnd = llvm::partition_point( *List, [](const NamedDecl *D) { return !D->isImplicit(); }); - return llvm::makeArrayRef(List->begin(), ExplicitEnd); + return llvm::ArrayRef(List->begin(), ExplicitEnd); } Decl *CXXRecordDecl::getLambdaContextDecl() const { @@ -1602,18 +1727,20 @@ Decl *CXXRecordDecl::getLambdaContextDecl() const { return getLambdaData().ContextDecl.get(Source); } -void CXXRecordDecl::setDeviceLambdaManglingNumber(unsigned Num) const { +void CXXRecordDecl::setLambdaNumbering(LambdaNumbering Numbering) { assert(isLambda() && "Not a lambda closure type!"); - if (Num) - getASTContext().DeviceLambdaManglingNumbers[this] = Num; + getLambdaData().ManglingNumber = Numbering.ManglingNumber; + if (Numbering.DeviceManglingNumber) + getASTContext().DeviceLambdaManglingNumbers[this] = + Numbering.DeviceManglingNumber; + getLambdaData().IndexInContext = Numbering.IndexInContext; + getLambdaData().ContextDecl = Numbering.ContextDecl; + getLambdaData().HasKnownInternalLinkage = Numbering.HasKnownInternalLinkage; } unsigned CXXRecordDecl::getDeviceLambdaManglingNumber() const { assert(isLambda() && "Not a lambda closure type!"); - auto I = getASTContext().DeviceLambdaManglingNumbers.find(this); - if (I != getASTContext().DeviceLambdaManglingNumbers.end()) - return I->second; - return 0; + return getASTContext().DeviceLambdaManglingNumbers.lookup(this); } static CanQualType GetConversionType(ASTContext &Context, NamedDecl *Conv) { @@ -1776,7 +1903,7 @@ void CXXRecordDecl::removeConversion(const NamedDecl *ConvDecl) { for (unsigned I = 0, E = Convs.size(); I != E; ++I) { if (Convs[I].getDecl() == ConvDecl) { Convs.erase(I); - assert(llvm::find(Convs, ConvDecl) == Convs.end() && + assert(!llvm::is_contained(Convs, ConvDecl) && "conversion was found multiple times in unresolved set"); return; } @@ -1894,7 +2021,14 @@ CXXDestructorDecl *CXXRecordDecl::getDestructor() const { DeclContext::lookup_result R = lookup(Name); - return R.empty() ? nullptr : dyn_cast<CXXDestructorDecl>(R.front()); + // If a destructor was marked as not selected, we skip it. We don't always + // have a selected destructor: dependent types, unnamed structs. + for (auto *Decl : R) { + auto* DD = dyn_cast<CXXDestructorDecl>(Decl); + if (DD && !DD->isIneligibleOrNotSelected()) + return DD; + } + return nullptr; } static bool isDeclContextInNamespace(const DeclContext *DC) { @@ -1961,40 +2095,39 @@ void CXXRecordDecl::completeDefinition() { completeDefinition(nullptr); } +static bool hasPureVirtualFinalOverrider( + const CXXRecordDecl &RD, const CXXFinalOverriderMap *FinalOverriders) { + if (!FinalOverriders) { + CXXFinalOverriderMap MyFinalOverriders; + RD.getFinalOverriders(MyFinalOverriders); + return hasPureVirtualFinalOverrider(RD, &MyFinalOverriders); + } + + for (const CXXFinalOverriderMap::value_type & + OverridingMethodsEntry : *FinalOverriders) { + for (const auto &[_, SubobjOverrides] : OverridingMethodsEntry.second) { + assert(SubobjOverrides.size() > 0 && + "All virtual functions have overriding virtual functions"); + + if (SubobjOverrides.front().Method->isPureVirtual()) + return true; + } + } + return false; +} + void CXXRecordDecl::completeDefinition(CXXFinalOverriderMap *FinalOverriders) { RecordDecl::completeDefinition(); // If the class may be abstract (but hasn't been marked as such), check for // any pure final overriders. - if (mayBeAbstract()) { - CXXFinalOverriderMap MyFinalOverriders; - if (!FinalOverriders) { - getFinalOverriders(MyFinalOverriders); - FinalOverriders = &MyFinalOverriders; - } - - bool Done = false; - for (CXXFinalOverriderMap::iterator M = FinalOverriders->begin(), - MEnd = FinalOverriders->end(); - M != MEnd && !Done; ++M) { - for (OverridingMethods::iterator SO = M->second.begin(), - SOEnd = M->second.end(); - SO != SOEnd && !Done; ++SO) { - assert(SO->second.size() > 0 && - "All virtual functions have overriding virtual functions"); - - // C++ [class.abstract]p4: - // A class is abstract if it contains or inherits at least one - // pure virtual function for which the final overrider is pure - // virtual. - if (SO->second.front().Method->isPure()) { - data().Abstract = true; - Done = true; - break; - } - } - } - } + // + // C++ [class.abstract]p4: + // A class is abstract if it contains or inherits at least one + // pure virtual function for which the final overrider is pure + // virtual. + if (mayBeAbstract() && hasPureVirtualFinalOverrider(*this, FinalOverriders)) + markAbstract(); // Set access bits correctly on the directly-declared conversions. for (conversion_iterator I = conversion_begin(), E = conversion_end(); @@ -2059,21 +2192,21 @@ ExplicitSpecifier ExplicitSpecifier::getFromDecl(FunctionDecl *Function) { } } -CXXDeductionGuideDecl * -CXXDeductionGuideDecl::Create(ASTContext &C, DeclContext *DC, - SourceLocation StartLoc, ExplicitSpecifier ES, - const DeclarationNameInfo &NameInfo, QualType T, - TypeSourceInfo *TInfo, SourceLocation EndLocation, - CXXConstructorDecl *Ctor) { +CXXDeductionGuideDecl *CXXDeductionGuideDecl::Create( + ASTContext &C, DeclContext *DC, SourceLocation StartLoc, + ExplicitSpecifier ES, const DeclarationNameInfo &NameInfo, QualType T, + TypeSourceInfo *TInfo, SourceLocation EndLocation, CXXConstructorDecl *Ctor, + DeductionCandidate Kind) { return new (C, DC) CXXDeductionGuideDecl(C, DC, StartLoc, ES, NameInfo, T, - TInfo, EndLocation, Ctor); + TInfo, EndLocation, Ctor, Kind); } -CXXDeductionGuideDecl *CXXDeductionGuideDecl::CreateDeserialized(ASTContext &C, - unsigned ID) { +CXXDeductionGuideDecl * +CXXDeductionGuideDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) { return new (C, ID) CXXDeductionGuideDecl( C, nullptr, SourceLocation(), ExplicitSpecifier(), DeclarationNameInfo(), - QualType(), nullptr, SourceLocation(), nullptr); + QualType(), nullptr, SourceLocation(), nullptr, + DeductionCandidate::Normal); } RequiresExprBodyDecl *RequiresExprBodyDecl::Create( @@ -2081,8 +2214,8 @@ RequiresExprBodyDecl *RequiresExprBodyDecl::Create( return new (C, DC) RequiresExprBodyDecl(C, DC, StartLoc); } -RequiresExprBodyDecl *RequiresExprBodyDecl::CreateDeserialized(ASTContext &C, - unsigned ID) { +RequiresExprBodyDecl * +RequiresExprBodyDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) { return new (C, ID) RequiresExprBodyDecl(C, nullptr, SourceLocation()); } @@ -2156,12 +2289,9 @@ CXXMethodDecl::getCorrespondingMethodInClass(const CXXRecordDecl *RD, } // Other candidate final overriders might be overridden by this function. - FinalOverriders.erase( - std::remove_if(FinalOverriders.begin(), FinalOverriders.end(), - [&](CXXMethodDecl *OtherD) { - return recursivelyOverrides(D, OtherD); - }), - FinalOverriders.end()); + llvm::erase_if(FinalOverriders, [&](CXXMethodDecl *OtherD) { + return recursivelyOverrides(D, OtherD); + }); FinalOverriders.push_back(D); }; @@ -2178,25 +2308,24 @@ CXXMethodDecl::getCorrespondingMethodInClass(const CXXRecordDecl *RD, return FinalOverriders.size() == 1 ? FinalOverriders.front() : nullptr; } -CXXMethodDecl *CXXMethodDecl::Create(ASTContext &C, CXXRecordDecl *RD, - SourceLocation StartLoc, - const DeclarationNameInfo &NameInfo, - QualType T, TypeSourceInfo *TInfo, - StorageClass SC, bool isInline, - ConstexprSpecKind ConstexprKind, - SourceLocation EndLocation, - Expr *TrailingRequiresClause) { - return new (C, RD) - CXXMethodDecl(CXXMethod, C, RD, StartLoc, NameInfo, T, TInfo, SC, - isInline, ConstexprKind, EndLocation, - TrailingRequiresClause); +CXXMethodDecl * +CXXMethodDecl::Create(ASTContext &C, CXXRecordDecl *RD, SourceLocation StartLoc, + const DeclarationNameInfo &NameInfo, QualType T, + TypeSourceInfo *TInfo, StorageClass SC, bool UsesFPIntrin, + bool isInline, ConstexprSpecKind ConstexprKind, + SourceLocation EndLocation, + Expr *TrailingRequiresClause) { + return new (C, RD) CXXMethodDecl( + CXXMethod, C, RD, StartLoc, NameInfo, T, TInfo, SC, UsesFPIntrin, + isInline, ConstexprKind, EndLocation, TrailingRequiresClause); } -CXXMethodDecl *CXXMethodDecl::CreateDeserialized(ASTContext &C, unsigned ID) { - return new (C, ID) - CXXMethodDecl(CXXMethod, C, nullptr, SourceLocation(), - DeclarationNameInfo(), QualType(), nullptr, SC_None, false, - ConstexprSpecKind::Unspecified, SourceLocation(), nullptr); +CXXMethodDecl *CXXMethodDecl::CreateDeserialized(ASTContext &C, + GlobalDeclID ID) { + return new (C, ID) CXXMethodDecl( + CXXMethod, C, nullptr, SourceLocation(), DeclarationNameInfo(), + QualType(), nullptr, SC_None, false, false, + ConstexprSpecKind::Unspecified, SourceLocation(), nullptr); } CXXMethodDecl *CXXMethodDecl::getDevirtualizedMethod(const Expr *Base, @@ -2211,7 +2340,7 @@ CXXMethodDecl *CXXMethodDecl::getDevirtualizedMethod(const Expr *Base, // If the member function is marked 'final', we know that it can't be // overridden and can therefore devirtualize it unless it's pure virtual. if (hasAttr<FinalAttr>()) - return isPure() ? nullptr : this; + return isPureVirtual() ? nullptr : this; // If Base is unknown, we cannot devirtualize. if (!Base) @@ -2240,7 +2369,7 @@ CXXMethodDecl *CXXMethodDecl::getDevirtualizedMethod(const Expr *Base, // If that method is pure virtual, we can't devirtualize. If this code is // reached, the result would be UB, not a direct call to the derived class // function, and we can't assume the derived class function is defined. - if (DevirtualizedMethod->isPure()) + if (DevirtualizedMethod->isPureVirtual()) return nullptr; // If that method is marked final, we can devirtualize it. @@ -2339,7 +2468,7 @@ bool CXXMethodDecl::isUsualDeallocationFunction( // In C++17 onwards, all potential usual deallocation functions are actual // usual deallocation functions. Honor this behavior when post-C++14 // deallocation functions are offered as extensions too. - // FIXME(EricWF): Destrying Delete should be a language option. How do we + // FIXME(EricWF): Destroying Delete should be a language option. How do we // handle when destroying delete is used prior to C++17? if (Context.getLangOpts().CPlusPlus17 || Context.getLangOpts().AlignedAllocation || @@ -2361,6 +2490,17 @@ bool CXXMethodDecl::isUsualDeallocationFunction( return Result; } +bool CXXMethodDecl::isExplicitObjectMemberFunction() const { + // C++2b [dcl.fct]p6: + // An explicit object member function is a non-static member + // function with an explicit object parameter + return !isStatic() && hasCXXExplicitFunctionObjectParameter(); +} + +bool CXXMethodDecl::isImplicitObjectMemberFunction() const { + return !isStatic() && !hasCXXExplicitFunctionObjectParameter(); +} + bool CXXMethodDecl::isCopyAssignmentOperator() const { // C++0x [class.copy]p17: // A user-declared copy assignment operator X::operator= is a non-static @@ -2368,11 +2508,12 @@ bool CXXMethodDecl::isCopyAssignmentOperator() const { // type X, X&, const X&, volatile X& or const volatile X&. if (/*operator=*/getOverloadedOperator() != OO_Equal || /*non-static*/ isStatic() || - /*non-template*/getPrimaryTemplate() || getDescribedFunctionTemplate() || - getNumParams() != 1) + + /*non-template*/ getPrimaryTemplate() || getDescribedFunctionTemplate() || + getNumExplicitParams() != 1) return false; - QualType ParamType = getParamDecl(0)->getType(); + QualType ParamType = getNonObjectParameter(0)->getType(); if (const auto *Ref = ParamType->getAs<LValueReferenceType>()) ParamType = Ref->getPointeeType(); @@ -2389,11 +2530,11 @@ bool CXXMethodDecl::isMoveAssignmentOperator() const { // X&&, const X&&, volatile X&&, or const volatile X&&. if (getOverloadedOperator() != OO_Equal || isStatic() || getPrimaryTemplate() || getDescribedFunctionTemplate() || - getNumParams() != 1) + getNumExplicitParams() != 1) return false; - QualType ParamType = getParamDecl(0)->getType(); - if (!isa<RValueReferenceType>(ParamType)) + QualType ParamType = getNonObjectParameter(0)->getType(); + if (!ParamType->isRValueReferenceType()) return false; ParamType = ParamType->getPointeeType(); @@ -2444,13 +2585,19 @@ QualType CXXMethodDecl::getThisType(const FunctionProtoType *FPT, const CXXRecordDecl *Decl) { ASTContext &C = Decl->getASTContext(); QualType ObjectTy = ::getThisObjectType(C, FPT, Decl); - return C.getPointerType(ObjectTy); -} -QualType CXXMethodDecl::getThisObjectType(const FunctionProtoType *FPT, - const CXXRecordDecl *Decl) { - ASTContext &C = Decl->getASTContext(); - return ::getThisObjectType(C, FPT, Decl); + // Unlike 'const' and 'volatile', a '__restrict' qualifier must be + // attached to the pointer type, not the pointee. + bool Restrict = FPT->getMethodQuals().hasRestrict(); + if (Restrict) + ObjectTy.removeLocalRestrict(); + + ObjectTy = C.getLangOpts().HLSL ? C.getLValueReferenceType(ObjectTy) + : C.getPointerType(ObjectTy); + + if (Restrict) + ObjectTy.addRestrict(); + return ObjectTy; } QualType CXXMethodDecl::getThisType() const { @@ -2464,11 +2611,17 @@ QualType CXXMethodDecl::getThisType() const { getParent()); } -QualType CXXMethodDecl::getThisObjectType() const { - // Ditto getThisType. - assert(isInstance() && "No 'this' for static methods!"); - return CXXMethodDecl::getThisObjectType( - getType()->castAs<FunctionProtoType>(), getParent()); +QualType CXXMethodDecl::getFunctionObjectParameterReferenceType() const { + if (isExplicitObjectMemberFunction()) + return parameters()[0]->getType(); + + ASTContext &C = getParentASTContext(); + const FunctionProtoType *FPT = getType()->castAs<FunctionProtoType>(); + QualType Type = ::getThisObjectType(C, FPT, getParent()); + RefQualifierKind RK = FPT->getRefQualifier(); + if (RK == RefQualifierKind::RQ_RValue) + return C.getRValueReferenceType(Type); + return C.getLValueReferenceType(Type); } bool CXXMethodDecl::hasInlineBody() const { @@ -2549,7 +2702,7 @@ SourceLocation CXXCtorInitializer::getSourceLocation() const { return getMemberLocation(); if (const auto *TSInfo = Initializee.get<TypeSourceInfo *>()) - return TSInfo->getTypeLoc().getLocalSourceRange().getBegin(); + return TSInfo->getTypeLoc().getBeginLoc(); return {}; } @@ -2568,12 +2721,12 @@ SourceRange CXXCtorInitializer::getSourceRange() const { CXXConstructorDecl::CXXConstructorDecl( ASTContext &C, CXXRecordDecl *RD, SourceLocation StartLoc, const DeclarationNameInfo &NameInfo, QualType T, TypeSourceInfo *TInfo, - ExplicitSpecifier ES, bool isInline, bool isImplicitlyDeclared, - ConstexprSpecKind ConstexprKind, InheritedConstructor Inherited, - Expr *TrailingRequiresClause) + ExplicitSpecifier ES, bool UsesFPIntrin, bool isInline, + bool isImplicitlyDeclared, ConstexprSpecKind ConstexprKind, + InheritedConstructor Inherited, Expr *TrailingRequiresClause) : CXXMethodDecl(CXXConstructor, C, RD, StartLoc, NameInfo, T, TInfo, - SC_None, isInline, ConstexprKind, SourceLocation(), - TrailingRequiresClause) { + SC_None, UsesFPIntrin, isInline, ConstexprKind, + SourceLocation(), TrailingRequiresClause) { setNumCtorInitializers(0); setInheritingConstructor(static_cast<bool>(Inherited)); setImplicit(isImplicitlyDeclared); @@ -2586,7 +2739,7 @@ CXXConstructorDecl::CXXConstructorDecl( void CXXConstructorDecl::anchor() {} CXXConstructorDecl *CXXConstructorDecl::CreateDeserialized(ASTContext &C, - unsigned ID, + GlobalDeclID ID, uint64_t AllocKind) { bool hasTrailingExplicit = static_cast<bool>(AllocKind & TAKHasTailExplicit); bool isInheritingConstructor = @@ -2596,7 +2749,7 @@ CXXConstructorDecl *CXXConstructorDecl::CreateDeserialized(ASTContext &C, isInheritingConstructor, hasTrailingExplicit); auto *Result = new (C, ID, Extra) CXXConstructorDecl( C, nullptr, SourceLocation(), DeclarationNameInfo(), QualType(), nullptr, - ExplicitSpecifier(), false, false, ConstexprSpecKind::Unspecified, + ExplicitSpecifier(), false, false, false, ConstexprSpecKind::Unspecified, InheritedConstructor(), nullptr); Result->setInheritingConstructor(isInheritingConstructor); Result->CXXConstructorDeclBits.HasTrailingExplicitSpecifier = @@ -2608,19 +2761,18 @@ CXXConstructorDecl *CXXConstructorDecl::CreateDeserialized(ASTContext &C, CXXConstructorDecl *CXXConstructorDecl::Create( ASTContext &C, CXXRecordDecl *RD, SourceLocation StartLoc, const DeclarationNameInfo &NameInfo, QualType T, TypeSourceInfo *TInfo, - ExplicitSpecifier ES, bool isInline, bool isImplicitlyDeclared, - ConstexprSpecKind ConstexprKind, InheritedConstructor Inherited, - Expr *TrailingRequiresClause) { + ExplicitSpecifier ES, bool UsesFPIntrin, bool isInline, + bool isImplicitlyDeclared, ConstexprSpecKind ConstexprKind, + InheritedConstructor Inherited, Expr *TrailingRequiresClause) { assert(NameInfo.getName().getNameKind() == DeclarationName::CXXConstructorName && "Name must refer to a constructor"); unsigned Extra = additionalSizeToAlloc<InheritedConstructor, ExplicitSpecifier>( Inherited ? 1 : 0, ES.getExpr() ? 1 : 0); - return new (C, RD, Extra) - CXXConstructorDecl(C, RD, StartLoc, NameInfo, T, TInfo, ES, isInline, - isImplicitlyDeclared, ConstexprKind, Inherited, - TrailingRequiresClause); + return new (C, RD, Extra) CXXConstructorDecl( + C, RD, StartLoc, NameInfo, T, TInfo, ES, UsesFPIntrin, isInline, + isImplicitlyDeclared, ConstexprKind, Inherited, TrailingRequiresClause); } CXXConstructorDecl::init_const_iterator CXXConstructorDecl::init_begin() const { @@ -2733,25 +2885,24 @@ bool CXXConstructorDecl::isSpecializationCopyingObject() const { void CXXDestructorDecl::anchor() {} -CXXDestructorDecl * -CXXDestructorDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +CXXDestructorDecl *CXXDestructorDecl::CreateDeserialized(ASTContext &C, + GlobalDeclID ID) { return new (C, ID) CXXDestructorDecl( C, nullptr, SourceLocation(), DeclarationNameInfo(), QualType(), nullptr, - false, false, ConstexprSpecKind::Unspecified, nullptr); + false, false, false, ConstexprSpecKind::Unspecified, nullptr); } CXXDestructorDecl *CXXDestructorDecl::Create( ASTContext &C, CXXRecordDecl *RD, SourceLocation StartLoc, const DeclarationNameInfo &NameInfo, QualType T, TypeSourceInfo *TInfo, - bool isInline, bool isImplicitlyDeclared, ConstexprSpecKind ConstexprKind, - Expr *TrailingRequiresClause) { + bool UsesFPIntrin, bool isInline, bool isImplicitlyDeclared, + ConstexprSpecKind ConstexprKind, Expr *TrailingRequiresClause) { assert(NameInfo.getName().getNameKind() == DeclarationName::CXXDestructorName && "Name must refer to a destructor"); - return new (C, RD) - CXXDestructorDecl(C, RD, StartLoc, NameInfo, T, TInfo, isInline, - isImplicitlyDeclared, ConstexprKind, - TrailingRequiresClause); + return new (C, RD) CXXDestructorDecl( + C, RD, StartLoc, NameInfo, T, TInfo, UsesFPIntrin, isInline, + isImplicitlyDeclared, ConstexprKind, TrailingRequiresClause); } void CXXDestructorDecl::setOperatorDelete(FunctionDecl *OD, Expr *ThisArg) { @@ -2766,25 +2917,26 @@ void CXXDestructorDecl::setOperatorDelete(FunctionDecl *OD, Expr *ThisArg) { void CXXConversionDecl::anchor() {} -CXXConversionDecl * -CXXConversionDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +CXXConversionDecl *CXXConversionDecl::CreateDeserialized(ASTContext &C, + GlobalDeclID ID) { return new (C, ID) CXXConversionDecl( C, nullptr, SourceLocation(), DeclarationNameInfo(), QualType(), nullptr, - false, ExplicitSpecifier(), ConstexprSpecKind::Unspecified, + false, false, ExplicitSpecifier(), ConstexprSpecKind::Unspecified, SourceLocation(), nullptr); } CXXConversionDecl *CXXConversionDecl::Create( ASTContext &C, CXXRecordDecl *RD, SourceLocation StartLoc, const DeclarationNameInfo &NameInfo, QualType T, TypeSourceInfo *TInfo, - bool isInline, ExplicitSpecifier ES, ConstexprSpecKind ConstexprKind, - SourceLocation EndLocation, Expr *TrailingRequiresClause) { + bool UsesFPIntrin, bool isInline, ExplicitSpecifier ES, + ConstexprSpecKind ConstexprKind, SourceLocation EndLocation, + Expr *TrailingRequiresClause) { assert(NameInfo.getName().getNameKind() == DeclarationName::CXXConversionFunctionName && "Name must refer to a conversion function"); - return new (C, RD) - CXXConversionDecl(C, RD, StartLoc, NameInfo, T, TInfo, isInline, ES, - ConstexprKind, EndLocation, TrailingRequiresClause); + return new (C, RD) CXXConversionDecl( + C, RD, StartLoc, NameInfo, T, TInfo, UsesFPIntrin, isInline, ES, + ConstexprKind, EndLocation, TrailingRequiresClause); } bool CXXConversionDecl::isLambdaToBlockPointerConversion() const { @@ -2793,8 +2945,8 @@ bool CXXConversionDecl::isLambdaToBlockPointerConversion() const { } LinkageSpecDecl::LinkageSpecDecl(DeclContext *DC, SourceLocation ExternLoc, - SourceLocation LangLoc, LanguageIDs lang, - bool HasBraces) + SourceLocation LangLoc, + LinkageSpecLanguageIDs lang, bool HasBraces) : Decl(LinkageSpec, DC, LangLoc), DeclContext(LinkageSpec), ExternLoc(ExternLoc), RBraceLoc(SourceLocation()) { setLanguage(lang); @@ -2803,19 +2955,19 @@ LinkageSpecDecl::LinkageSpecDecl(DeclContext *DC, SourceLocation ExternLoc, void LinkageSpecDecl::anchor() {} -LinkageSpecDecl *LinkageSpecDecl::Create(ASTContext &C, - DeclContext *DC, +LinkageSpecDecl *LinkageSpecDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation ExternLoc, SourceLocation LangLoc, - LanguageIDs Lang, + LinkageSpecLanguageIDs Lang, bool HasBraces) { return new (C, DC) LinkageSpecDecl(DC, ExternLoc, LangLoc, Lang, HasBraces); } LinkageSpecDecl *LinkageSpecDecl::CreateDeserialized(ASTContext &C, - unsigned ID) { - return new (C, ID) LinkageSpecDecl(nullptr, SourceLocation(), - SourceLocation(), lang_c, false); + GlobalDeclID ID) { + return new (C, ID) + LinkageSpecDecl(nullptr, SourceLocation(), SourceLocation(), + LinkageSpecLanguageIDs::C, false); } void UsingDirectiveDecl::anchor() {} @@ -2828,13 +2980,13 @@ UsingDirectiveDecl *UsingDirectiveDecl::Create(ASTContext &C, DeclContext *DC, NamedDecl *Used, DeclContext *CommonAncestor) { if (auto *NS = dyn_cast_or_null<NamespaceDecl>(Used)) - Used = NS->getOriginalNamespace(); + Used = NS->getFirstDecl(); return new (C, DC) UsingDirectiveDecl(DC, L, NamespaceLoc, QualifierLoc, IdentLoc, Used, CommonAncestor); } UsingDirectiveDecl *UsingDirectiveDecl::CreateDeserialized(ASTContext &C, - unsigned ID) { + GlobalDeclID ID) { return new (C, ID) UsingDirectiveDecl(nullptr, SourceLocation(), SourceLocation(), NestedNameSpecifierLoc(), @@ -2849,45 +3001,29 @@ NamespaceDecl *UsingDirectiveDecl::getNominatedNamespace() { NamespaceDecl::NamespaceDecl(ASTContext &C, DeclContext *DC, bool Inline, SourceLocation StartLoc, SourceLocation IdLoc, - IdentifierInfo *Id, NamespaceDecl *PrevDecl) + IdentifierInfo *Id, NamespaceDecl *PrevDecl, + bool Nested) : NamedDecl(Namespace, DC, IdLoc, Id), DeclContext(Namespace), - redeclarable_base(C), LocStart(StartLoc), - AnonOrFirstNamespaceAndInline(nullptr, Inline) { + redeclarable_base(C), LocStart(StartLoc) { + setInline(Inline); + setNested(Nested); setPreviousDecl(PrevDecl); - - if (PrevDecl) - AnonOrFirstNamespaceAndInline.setPointer(PrevDecl->getOriginalNamespace()); } NamespaceDecl *NamespaceDecl::Create(ASTContext &C, DeclContext *DC, bool Inline, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, - NamespaceDecl *PrevDecl) { - return new (C, DC) NamespaceDecl(C, DC, Inline, StartLoc, IdLoc, Id, - PrevDecl); + NamespaceDecl *PrevDecl, bool Nested) { + return new (C, DC) + NamespaceDecl(C, DC, Inline, StartLoc, IdLoc, Id, PrevDecl, Nested); } -NamespaceDecl *NamespaceDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +NamespaceDecl *NamespaceDecl::CreateDeserialized(ASTContext &C, + GlobalDeclID ID) { return new (C, ID) NamespaceDecl(C, nullptr, false, SourceLocation(), - SourceLocation(), nullptr, nullptr); + SourceLocation(), nullptr, nullptr, false); } -NamespaceDecl *NamespaceDecl::getOriginalNamespace() { - if (isFirstDecl()) - return this; - - return AnonOrFirstNamespaceAndInline.getPointer(); -} - -const NamespaceDecl *NamespaceDecl::getOriginalNamespace() const { - if (isFirstDecl()) - return this; - - return AnonOrFirstNamespaceAndInline.getPointer(); -} - -bool NamespaceDecl::isOriginalNamespace() const { return isFirstDecl(); } - NamespaceDecl *NamespaceDecl::getNextRedeclarationImpl() { return getNextRedeclaration(); } @@ -2923,13 +3059,13 @@ NamespaceAliasDecl *NamespaceAliasDecl::Create(ASTContext &C, DeclContext *DC, NamedDecl *Namespace) { // FIXME: Preserve the aliased namespace as written. if (auto *NS = dyn_cast_or_null<NamespaceDecl>(Namespace)) - Namespace = NS->getOriginalNamespace(); + Namespace = NS->getFirstDecl(); return new (C, DC) NamespaceAliasDecl(C, DC, UsingLoc, AliasLoc, Alias, QualifierLoc, IdentLoc, Namespace); } -NamespaceAliasDecl * -NamespaceAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +NamespaceAliasDecl *NamespaceAliasDecl::CreateDeserialized(ASTContext &C, + GlobalDeclID ID) { return new (C, ID) NamespaceAliasDecl(C, nullptr, SourceLocation(), SourceLocation(), nullptr, NestedNameSpecifierLoc(), @@ -2973,8 +3109,10 @@ UsingShadowDecl::UsingShadowDecl(Kind K, ASTContext &C, DeclContext *DC, BaseUsingDecl *Introducer, NamedDecl *Target) : NamedDecl(K, DC, Loc, Name), redeclarable_base(C), UsingOrNextShadow(Introducer) { - if (Target) + if (Target) { + assert(!isa<UsingShadowDecl>(Target)); setTargetDecl(Target); + } setImplicit(); } @@ -2982,8 +3120,8 @@ UsingShadowDecl::UsingShadowDecl(Kind K, ASTContext &C, EmptyShell Empty) : NamedDecl(K, nullptr, SourceLocation(), DeclarationName()), redeclarable_base(C) {} -UsingShadowDecl * -UsingShadowDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +UsingShadowDecl *UsingShadowDecl::CreateDeserialized(ASTContext &C, + GlobalDeclID ID) { return new (C, ID) UsingShadowDecl(UsingShadow, C, EmptyShell()); } @@ -3006,7 +3144,7 @@ ConstructorUsingShadowDecl::Create(ASTContext &C, DeclContext *DC, } ConstructorUsingShadowDecl * -ConstructorUsingShadowDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +ConstructorUsingShadowDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) { return new (C, ID) ConstructorUsingShadowDecl(C, EmptyShell()); } @@ -3017,8 +3155,7 @@ CXXRecordDecl *ConstructorUsingShadowDecl::getNominatedBaseClass() const { void BaseUsingDecl::anchor() {} void BaseUsingDecl::addShadowDecl(UsingShadowDecl *S) { - assert(std::find(shadow_begin(), shadow_end(), S) == shadow_end() && - "declaration already in set"); + assert(!llvm::is_contained(shadows(), S) && "declaration already in set"); assert(S->getIntroducer() == this); if (FirstUsingShadow.getPointer()) @@ -3027,8 +3164,7 @@ void BaseUsingDecl::addShadowDecl(UsingShadowDecl *S) { } void BaseUsingDecl::removeShadowDecl(UsingShadowDecl *S) { - assert(std::find(shadow_begin(), shadow_end(), S) != shadow_end() && - "declaration not in set"); + assert(llvm::is_contained(shadows(), S) && "declaration not in set"); assert(S->getIntroducer() == this); // Remove S from the shadow decl chain. This is O(n) but hopefully rare. @@ -3056,7 +3192,7 @@ UsingDecl *UsingDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation UL, return new (C, DC) UsingDecl(DC, UL, QualifierLoc, NameInfo, HasTypename); } -UsingDecl *UsingDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +UsingDecl *UsingDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) { return new (C, ID) UsingDecl(nullptr, SourceLocation(), NestedNameSpecifierLoc(), DeclarationNameInfo(), false); @@ -3071,18 +3207,24 @@ SourceRange UsingDecl::getSourceRange() const { void UsingEnumDecl::anchor() {} UsingEnumDecl *UsingEnumDecl::Create(ASTContext &C, DeclContext *DC, - SourceLocation UL, SourceLocation EL, - SourceLocation NL, EnumDecl *Enum) { - return new (C, DC) UsingEnumDecl(DC, Enum->getDeclName(), UL, EL, NL, Enum); + SourceLocation UL, + SourceLocation EL, + SourceLocation NL, + TypeSourceInfo *EnumType) { + assert(isa<EnumDecl>(EnumType->getType()->getAsTagDecl())); + return new (C, DC) + UsingEnumDecl(DC, EnumType->getType()->getAsTagDecl()->getDeclName(), UL, EL, NL, EnumType); } -UsingEnumDecl *UsingEnumDecl::CreateDeserialized(ASTContext &C, unsigned ID) { - return new (C, ID) UsingEnumDecl(nullptr, DeclarationName(), SourceLocation(), - SourceLocation(), SourceLocation(), nullptr); +UsingEnumDecl *UsingEnumDecl::CreateDeserialized(ASTContext &C, + GlobalDeclID ID) { + return new (C, ID) + UsingEnumDecl(nullptr, DeclarationName(), SourceLocation(), + SourceLocation(), SourceLocation(), nullptr); } SourceRange UsingEnumDecl::getSourceRange() const { - return SourceRange(EnumLocation, getLocation()); + return SourceRange(UsingLocation, EnumType->getTypeLoc().getEndLoc()); } void UsingPackDecl::anchor() {} @@ -3094,10 +3236,11 @@ UsingPackDecl *UsingPackDecl::Create(ASTContext &C, DeclContext *DC, return new (C, DC, Extra) UsingPackDecl(DC, InstantiatedFrom, UsingDecls); } -UsingPackDecl *UsingPackDecl::CreateDeserialized(ASTContext &C, unsigned ID, +UsingPackDecl *UsingPackDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID, unsigned NumExpansions) { size_t Extra = additionalSizeToAlloc<NamedDecl *>(NumExpansions); - auto *Result = new (C, ID, Extra) UsingPackDecl(nullptr, nullptr, None); + auto *Result = + new (C, ID, Extra) UsingPackDecl(nullptr, nullptr, std::nullopt); Result->NumExpansions = NumExpansions; auto *Trail = Result->getTrailingObjects<NamedDecl *>(); for (unsigned I = 0; I != NumExpansions; ++I) @@ -3119,7 +3262,7 @@ UnresolvedUsingValueDecl::Create(ASTContext &C, DeclContext *DC, } UnresolvedUsingValueDecl * -UnresolvedUsingValueDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +UnresolvedUsingValueDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) { return new (C, ID) UnresolvedUsingValueDecl(nullptr, QualType(), SourceLocation(), NestedNameSpecifierLoc(), @@ -3149,7 +3292,8 @@ UnresolvedUsingTypenameDecl::Create(ASTContext &C, DeclContext *DC, } UnresolvedUsingTypenameDecl * -UnresolvedUsingTypenameDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +UnresolvedUsingTypenameDecl::CreateDeserialized(ASTContext &C, + GlobalDeclID ID) { return new (C, ID) UnresolvedUsingTypenameDecl( nullptr, SourceLocation(), SourceLocation(), NestedNameSpecifierLoc(), SourceLocation(), nullptr, SourceLocation()); @@ -3162,7 +3306,8 @@ UnresolvedUsingIfExistsDecl::Create(ASTContext &Ctx, DeclContext *DC, } UnresolvedUsingIfExistsDecl * -UnresolvedUsingIfExistsDecl::CreateDeserialized(ASTContext &Ctx, unsigned ID) { +UnresolvedUsingIfExistsDecl::CreateDeserialized(ASTContext &Ctx, + GlobalDeclID ID) { return new (Ctx, ID) UnresolvedUsingIfExistsDecl(nullptr, SourceLocation(), DeclarationName()); } @@ -3178,8 +3323,7 @@ void StaticAssertDecl::anchor() {} StaticAssertDecl *StaticAssertDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation StaticAssertLoc, - Expr *AssertExpr, - StringLiteral *Message, + Expr *AssertExpr, Expr *Message, SourceLocation RParenLoc, bool Failed) { return new (C, DC) StaticAssertDecl(DC, StaticAssertLoc, AssertExpr, Message, @@ -3187,11 +3331,21 @@ StaticAssertDecl *StaticAssertDecl::Create(ASTContext &C, DeclContext *DC, } StaticAssertDecl *StaticAssertDecl::CreateDeserialized(ASTContext &C, - unsigned ID) { + GlobalDeclID ID) { return new (C, ID) StaticAssertDecl(nullptr, SourceLocation(), nullptr, nullptr, SourceLocation(), false); } +VarDecl *ValueDecl::getPotentiallyDecomposedVarDecl() { + assert((isa<VarDecl, BindingDecl>(this)) && + "expected a VarDecl or a BindingDecl"); + if (auto *Var = llvm::dyn_cast<VarDecl>(this)) + return Var; + if (auto *BD = llvm::dyn_cast<BindingDecl>(this)) + return llvm::dyn_cast<VarDecl>(BD->getDecomposedDecl()); + return nullptr; +} + void BindingDecl::anchor() {} BindingDecl *BindingDecl::Create(ASTContext &C, DeclContext *DC, @@ -3199,7 +3353,7 @@ BindingDecl *BindingDecl::Create(ASTContext &C, DeclContext *DC, return new (C, DC) BindingDecl(DC, IdLoc, Id); } -BindingDecl *BindingDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +BindingDecl *BindingDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) { return new (C, ID) BindingDecl(nullptr, SourceLocation(), nullptr); } @@ -3230,12 +3384,12 @@ DecompositionDecl *DecompositionDecl::Create(ASTContext &C, DeclContext *DC, } DecompositionDecl *DecompositionDecl::CreateDeserialized(ASTContext &C, - unsigned ID, + GlobalDeclID ID, unsigned NumBindings) { size_t Extra = additionalSizeToAlloc<BindingDecl *>(NumBindings); auto *Result = new (C, ID, Extra) DecompositionDecl(C, nullptr, SourceLocation(), SourceLocation(), - QualType(), nullptr, StorageClass(), None); + QualType(), nullptr, StorageClass(), std::nullopt); // Set up and clean out the bindings array. Result->NumBindings = NumBindings; auto *Trail = Result->getTrailingObjects<BindingDecl *>(); @@ -3244,16 +3398,17 @@ DecompositionDecl *DecompositionDecl::CreateDeserialized(ASTContext &C, return Result; } -void DecompositionDecl::printName(llvm::raw_ostream &os) const { - os << '['; +void DecompositionDecl::printName(llvm::raw_ostream &OS, + const PrintingPolicy &Policy) const { + OS << '['; bool Comma = false; for (const auto *B : bindings()) { if (Comma) - os << ", "; - B->printName(os); + OS << ", "; + B->printName(OS, Policy); Comma = true; } - os << ']'; + OS << ']'; } void MSPropertyDecl::anchor() {} @@ -3268,7 +3423,7 @@ MSPropertyDecl *MSPropertyDecl::Create(ASTContext &C, DeclContext *DC, } MSPropertyDecl *MSPropertyDecl::CreateDeserialized(ASTContext &C, - unsigned ID) { + GlobalDeclID ID) { return new (C, ID) MSPropertyDecl(nullptr, SourceLocation(), DeclarationName(), QualType(), nullptr, SourceLocation(), nullptr, nullptr); @@ -3278,18 +3433,19 @@ void MSGuidDecl::anchor() {} MSGuidDecl::MSGuidDecl(DeclContext *DC, QualType T, Parts P) : ValueDecl(Decl::MSGuid, DC, SourceLocation(), DeclarationName(), T), - PartVal(P), APVal() {} + PartVal(P) {} MSGuidDecl *MSGuidDecl::Create(const ASTContext &C, QualType T, Parts P) { DeclContext *DC = C.getTranslationUnitDecl(); return new (C, DC) MSGuidDecl(DC, T, P); } -MSGuidDecl *MSGuidDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +MSGuidDecl *MSGuidDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) { return new (C, ID) MSGuidDecl(nullptr, QualType(), Parts()); } -void MSGuidDecl::printName(llvm::raw_ostream &OS) const { +void MSGuidDecl::printName(llvm::raw_ostream &OS, + const PrintingPolicy &) const { OS << llvm::format("GUID{%08" PRIx32 "-%04" PRIx16 "-%04" PRIx16 "-", PartVal.Part1, PartVal.Part2, PartVal.Part3); unsigned I = 0; @@ -3334,7 +3490,8 @@ static bool isValidStructGUID(ASTContext &Ctx, QualType T) { return false; auto MatcherIt = Fields.begin(); for (const FieldDecl *FD : RD->fields()) { - if (FD->isUnnamedBitfield()) continue; + if (FD->isUnnamedBitField()) + continue; if (FD->isBitField() || MatcherIt == Fields.end() || !(*MatcherIt)(FD->getType())) return false; @@ -3370,6 +3527,39 @@ APValue &MSGuidDecl::getAsAPValue() const { return APVal; } +void UnnamedGlobalConstantDecl::anchor() {} + +UnnamedGlobalConstantDecl::UnnamedGlobalConstantDecl(const ASTContext &C, + DeclContext *DC, + QualType Ty, + const APValue &Val) + : ValueDecl(Decl::UnnamedGlobalConstant, DC, SourceLocation(), + DeclarationName(), Ty), + Value(Val) { + // Cleanup the embedded APValue if required (note that our destructor is never + // run) + if (Value.needsCleanup()) + C.addDestruction(&Value); +} + +UnnamedGlobalConstantDecl * +UnnamedGlobalConstantDecl::Create(const ASTContext &C, QualType T, + const APValue &Value) { + DeclContext *DC = C.getTranslationUnitDecl(); + return new (C, DC) UnnamedGlobalConstantDecl(C, DC, T, Value); +} + +UnnamedGlobalConstantDecl * +UnnamedGlobalConstantDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) { + return new (C, ID) + UnnamedGlobalConstantDecl(C, nullptr, QualType(), APValue()); +} + +void UnnamedGlobalConstantDecl::printName(llvm::raw_ostream &OS, + const PrintingPolicy &) const { + OS << "unnamed-global-constant"; +} + static const char *getAccessName(AccessSpecifier AS) { switch (AS) { case AS_none: diff --git a/contrib/llvm-project/clang/lib/AST/DeclFriend.cpp b/contrib/llvm-project/clang/lib/AST/DeclFriend.cpp index 8ec1dea84df5..04b9b93699f3 100644 --- a/contrib/llvm-project/clang/lib/AST/DeclFriend.cpp +++ b/contrib/llvm-project/clang/lib/AST/DeclFriend.cpp @@ -62,7 +62,7 @@ FriendDecl *FriendDecl::Create(ASTContext &C, DeclContext *DC, return FD; } -FriendDecl *FriendDecl::CreateDeserialized(ASTContext &C, unsigned ID, +FriendDecl *FriendDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID, unsigned FriendTypeNumTPLists) { std::size_t Extra = additionalSizeToAlloc<TemplateParameterList *>(FriendTypeNumTPLists); diff --git a/contrib/llvm-project/clang/lib/AST/DeclObjC.cpp b/contrib/llvm-project/clang/lib/AST/DeclObjC.cpp index 6e790f03b027..83062b0e6887 100644 --- a/contrib/llvm-project/clang/lib/AST/DeclObjC.cpp +++ b/contrib/llvm-project/clang/lib/AST/DeclObjC.cpp @@ -16,6 +16,7 @@ #include "clang/AST/Attr.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclBase.h" +#include "clang/AST/ODRHash.h" #include "clang/AST/Stmt.h" #include "clang/AST/Type.h" #include "clang/AST/TypeLoc.h" @@ -23,7 +24,6 @@ #include "clang/Basic/LLVM.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/SourceLocation.h" -#include "llvm/ADT/None.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Casting.h" @@ -66,7 +66,8 @@ void ObjCProtocolList::set(ObjCProtocolDecl* const* InList, unsigned Elts, //===----------------------------------------------------------------------===// ObjCContainerDecl::ObjCContainerDecl(Kind DK, DeclContext *DC, - IdentifierInfo *Id, SourceLocation nameLoc, + const IdentifierInfo *Id, + SourceLocation nameLoc, SourceLocation atStartLoc) : NamedDecl(DK, DC, nameLoc, Id), DeclContext(DK) { setAtStartLoc(atStartLoc); @@ -232,6 +233,18 @@ ObjCPropertyDecl::getDefaultSynthIvarName(ASTContext &Ctx) const { return &Ctx.Idents.get(ivarName.str()); } +ObjCPropertyDecl *ObjCContainerDecl::getProperty(const IdentifierInfo *Id, + bool IsInstance) const { + for (auto *LookupResult : lookup(Id)) { + if (auto *Prop = dyn_cast<ObjCPropertyDecl>(LookupResult)) { + if (Prop->isInstanceProperty() == IsInstance) { + return Prop; + } + } + } + return nullptr; +} + /// FindPropertyDeclaration - Finds declaration of the property given its name /// in 'PropertyId' and returns it. It returns 0, if not found. ObjCPropertyDecl *ObjCContainerDecl::FindPropertyDeclaration( @@ -366,10 +379,8 @@ SourceLocation ObjCInterfaceDecl::getSuperClassLoc() const { /// FindPropertyVisibleInPrimaryClass - Finds declaration of the property /// with name 'PropertyId' in the primary class; including those in protocols /// (direct or indirect) used by the primary class. -ObjCPropertyDecl * -ObjCInterfaceDecl::FindPropertyVisibleInPrimaryClass( - IdentifierInfo *PropertyId, - ObjCPropertyQueryKind QueryKind) const { +ObjCPropertyDecl *ObjCInterfaceDecl::FindPropertyVisibleInPrimaryClass( + const IdentifierInfo *PropertyId, ObjCPropertyQueryKind QueryKind) const { // FIXME: Should make sure no callers ever do this. if (!hasDefinition()) return nullptr; @@ -391,21 +402,18 @@ ObjCInterfaceDecl::FindPropertyVisibleInPrimaryClass( return nullptr; } -void ObjCInterfaceDecl::collectPropertiesToImplement(PropertyMap &PM, - PropertyDeclOrder &PO) const { +void ObjCInterfaceDecl::collectPropertiesToImplement(PropertyMap &PM) const { for (auto *Prop : properties()) { PM[std::make_pair(Prop->getIdentifier(), Prop->isClassProperty())] = Prop; - PO.push_back(Prop); } for (const auto *Ext : known_extensions()) { const ObjCCategoryDecl *ClassExt = Ext; for (auto *Prop : ClassExt->properties()) { PM[std::make_pair(Prop->getIdentifier(), Prop->isClassProperty())] = Prop; - PO.push_back(Prop); } } for (const auto *PI : all_referenced_protocols()) - PI->collectPropertiesToImplement(PM, PO); + PI->collectPropertiesToImplement(PM); // Note, the properties declared only in class extensions are still copied // into the main @interface's property list, and therefore we don't // explicitly, have to search class extension properties. @@ -603,10 +611,6 @@ void ObjCInterfaceDecl::allocateDefinitionData() { assert(!hasDefinition() && "ObjC class already has a definition"); Data.setPointer(new (getASTContext()) DefinitionData()); Data.getPointer()->Definition = this; - - // Make the type point at the definition, now that we have one. - if (TypeForDecl) - cast<ObjCInterfaceType>(TypeForDecl)->Decl = this; } void ObjCInterfaceDecl::startDefinition() { @@ -619,6 +623,17 @@ void ObjCInterfaceDecl::startDefinition() { } } +void ObjCInterfaceDecl::startDuplicateDefinitionForComparison() { + Data.setPointer(nullptr); + allocateDefinitionData(); + // Don't propagate data to other redeclarations. +} + +void ObjCInterfaceDecl::mergeDuplicateDefinitionWithCommon( + const ObjCInterfaceDecl *Definition) { + Data = Definition->Data; +} + ObjCIvarDecl *ObjCInterfaceDecl::lookupInstanceVariable(IdentifierInfo *ID, ObjCInterfaceDecl *&clsDeclared) { // FIXME: Should make sure no callers ever do this. @@ -773,6 +788,33 @@ ObjCMethodDecl *ObjCInterfaceDecl::lookupPrivateMethod( return Method; } +unsigned ObjCInterfaceDecl::getODRHash() { + assert(hasDefinition() && "ODRHash only for records with definitions"); + + // Previously calculated hash is stored in DefinitionData. + if (hasODRHash()) + return data().ODRHash; + + // Only calculate hash on first call of getODRHash per record. + ODRHash Hasher; + Hasher.AddObjCInterfaceDecl(getDefinition()); + data().ODRHash = Hasher.CalculateHash(); + setHasODRHash(true); + + return data().ODRHash; +} + +bool ObjCInterfaceDecl::hasODRHash() const { + if (!hasDefinition()) + return false; + return data().HasODRHash; +} + +void ObjCInterfaceDecl::setHasODRHash(bool HasHash) { + assert(hasDefinition() && "Cannot set ODRHash without definition"); + data().HasODRHash = HasHash; +} + //===----------------------------------------------------------------------===// // ObjCMethodDecl //===----------------------------------------------------------------------===// @@ -782,7 +824,7 @@ ObjCMethodDecl::ObjCMethodDecl( QualType T, TypeSourceInfo *ReturnTInfo, DeclContext *contextDecl, bool isInstance, bool isVariadic, bool isPropertyAccessor, bool isSynthesizedAccessorStub, bool isImplicitlyDeclared, bool isDefined, - ImplementationControl impControl, bool HasRelatedResultType) + ObjCImplementationControl impControl, bool HasRelatedResultType) : NamedDecl(ObjCMethod, contextDecl, beginLoc, SelInfo), DeclContext(ObjCMethod), MethodDeclType(T), ReturnTInfo(ReturnTInfo), DeclEndLoc(endLoc) { @@ -812,15 +854,16 @@ ObjCMethodDecl *ObjCMethodDecl::Create( Selector SelInfo, QualType T, TypeSourceInfo *ReturnTInfo, DeclContext *contextDecl, bool isInstance, bool isVariadic, bool isPropertyAccessor, bool isSynthesizedAccessorStub, - bool isImplicitlyDeclared, bool isDefined, ImplementationControl impControl, - bool HasRelatedResultType) { + bool isImplicitlyDeclared, bool isDefined, + ObjCImplementationControl impControl, bool HasRelatedResultType) { return new (C, contextDecl) ObjCMethodDecl( beginLoc, endLoc, SelInfo, T, ReturnTInfo, contextDecl, isInstance, isVariadic, isPropertyAccessor, isSynthesizedAccessorStub, isImplicitlyDeclared, isDefined, impControl, HasRelatedResultType); } -ObjCMethodDecl *ObjCMethodDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +ObjCMethodDecl *ObjCMethodDecl::CreateDeserialized(ASTContext &C, + GlobalDeclID ID) { return new (C, ID) ObjCMethodDecl(SourceLocation(), SourceLocation(), Selector(), QualType(), nullptr, nullptr); } @@ -855,6 +898,14 @@ bool ObjCMethodDecl::isDesignatedInitializerForTheInterface( return false; } +bool ObjCMethodDecl::hasParamDestroyedInCallee() const { + for (auto *param : parameters()) { + if (param->isDestroyedInCallee()) + return true; + } + return false; +} + Stmt *ObjCMethodDecl::getBody() const { return Body.get(getASTContext().getExternalSource()); } @@ -880,8 +931,8 @@ void ObjCMethodDecl::setParamsAndSelLocs(ASTContext &C, unsigned Size = sizeof(ParmVarDecl *) * NumParams + sizeof(SourceLocation) * SelLocs.size(); ParamsAndSelLocs = C.Allocate(Size); - std::copy(Params.begin(), Params.end(), getParams()); - std::copy(SelLocs.begin(), SelLocs.end(), getStoredSelLocs()); + std::uninitialized_copy(Params.begin(), Params.end(), getParams()); + std::uninitialized_copy(SelLocs.begin(), SelLocs.end(), getStoredSelLocs()); } void ObjCMethodDecl::getSelectorLocs( @@ -896,12 +947,12 @@ void ObjCMethodDecl::setMethodParams(ASTContext &C, assert((!SelLocs.empty() || isImplicit()) && "No selector locs for non-implicit method"); if (isImplicit()) - return setParamsAndSelLocs(C, Params, llvm::None); + return setParamsAndSelLocs(C, Params, std::nullopt); setSelLocsKind(hasStandardSelectorLocs(getSelector(), SelLocs, Params, DeclEndLoc)); if (getSelLocsKind() != SelLoc_NonStandard) - return setParamsAndSelLocs(C, Params, llvm::None); + return setParamsAndSelLocs(C, Params, std::nullopt); setParamsAndSelLocs(C, Params, SelLocs); } @@ -1143,7 +1194,7 @@ void ObjCMethodDecl::createImplicitParams(ASTContext &Context, getSelfType(Context, OID, selfIsPseudoStrong, selfIsConsumed); auto *Self = ImplicitParamDecl::Create(Context, this, SourceLocation(), &Context.Idents.get("self"), selfTy, - ImplicitParamDecl::ObjCSelf); + ImplicitParamKind::ObjCSelf); setSelfDecl(Self); if (selfIsConsumed) @@ -1154,7 +1205,7 @@ void ObjCMethodDecl::createImplicitParams(ASTContext &Context, setCmdDecl(ImplicitParamDecl::Create( Context, this, SourceLocation(), &Context.Idents.get("_cmd"), - Context.getObjCSelType(), ImplicitParamDecl::ObjCCmd)); + Context.getObjCSelType(), ImplicitParamKind::ObjCCmd)); } ObjCInterfaceDecl *ObjCMethodDecl::getClassInterface() { @@ -1436,7 +1487,7 @@ ObjCTypeParamDecl *ObjCTypeParamDecl::Create(ASTContext &ctx, DeclContext *dc, } ObjCTypeParamDecl *ObjCTypeParamDecl::CreateDeserialized(ASTContext &ctx, - unsigned ID) { + GlobalDeclID ID) { return new (ctx, ID) ObjCTypeParamDecl(ctx, nullptr, ObjCTypeParamVariance::Invariant, SourceLocation(), 0, SourceLocation(), @@ -1480,7 +1531,7 @@ ObjCTypeParamList *ObjCTypeParamList::create( void ObjCTypeParamList::gatherDefaultTypeArgs( SmallVectorImpl<QualType> &typeArgs) const { typeArgs.reserve(size()); - for (auto typeParam : *this) + for (auto *typeParam : *this) typeArgs.push_back(typeParam->getUnderlyingType()); } @@ -1488,14 +1539,10 @@ void ObjCTypeParamList::gatherDefaultTypeArgs( // ObjCInterfaceDecl //===----------------------------------------------------------------------===// -ObjCInterfaceDecl *ObjCInterfaceDecl::Create(const ASTContext &C, - DeclContext *DC, - SourceLocation atLoc, - IdentifierInfo *Id, - ObjCTypeParamList *typeParamList, - ObjCInterfaceDecl *PrevDecl, - SourceLocation ClassLoc, - bool isInternal){ +ObjCInterfaceDecl *ObjCInterfaceDecl::Create( + const ASTContext &C, DeclContext *DC, SourceLocation atLoc, + const IdentifierInfo *Id, ObjCTypeParamList *typeParamList, + ObjCInterfaceDecl *PrevDecl, SourceLocation ClassLoc, bool isInternal) { auto *Result = new (C, DC) ObjCInterfaceDecl(C, DC, atLoc, Id, typeParamList, ClassLoc, PrevDecl, isInternal); @@ -1505,7 +1552,7 @@ ObjCInterfaceDecl *ObjCInterfaceDecl::Create(const ASTContext &C, } ObjCInterfaceDecl *ObjCInterfaceDecl::CreateDeserialized(const ASTContext &C, - unsigned ID) { + GlobalDeclID ID) { auto *Result = new (C, ID) ObjCInterfaceDecl(C, nullptr, SourceLocation(), nullptr, nullptr, SourceLocation(), nullptr, false); @@ -1513,12 +1560,10 @@ ObjCInterfaceDecl *ObjCInterfaceDecl::CreateDeserialized(const ASTContext &C, return Result; } -ObjCInterfaceDecl::ObjCInterfaceDecl(const ASTContext &C, DeclContext *DC, - SourceLocation AtLoc, IdentifierInfo *Id, - ObjCTypeParamList *typeParamList, - SourceLocation CLoc, - ObjCInterfaceDecl *PrevDecl, - bool IsInternal) +ObjCInterfaceDecl::ObjCInterfaceDecl( + const ASTContext &C, DeclContext *DC, SourceLocation AtLoc, + const IdentifierInfo *Id, ObjCTypeParamList *typeParamList, + SourceLocation CLoc, ObjCInterfaceDecl *PrevDecl, bool IsInternal) : ObjCContainerDecl(ObjCInterface, DC, Id, CLoc, AtLoc), redeclarable_base(C) { setPreviousDecl(PrevDecl); @@ -1631,6 +1676,11 @@ ObjCIvarDecl *ObjCInterfaceDecl::all_declared_ivar_begin() { ObjCIvarDecl *curIvar = nullptr; if (!data().IvarList) { + // Force ivar deserialization upfront, before building IvarList. + (void)ivar_empty(); + for (const auto *Ext : known_extensions()) { + (void)Ext->ivar_empty(); + } if (!ivar_empty()) { ObjCInterfaceDecl::ivar_iterator I = ivar_begin(), E = ivar_end(); data().IvarList = *I; ++I; @@ -1695,8 +1745,8 @@ ObjCIvarDecl *ObjCInterfaceDecl::all_declared_ivar_begin() { /// categories for this class and returns it. Name of the category is passed /// in 'CategoryId'. If category not found, return 0; /// -ObjCCategoryDecl * -ObjCInterfaceDecl::FindCategoryDeclaration(IdentifierInfo *CategoryId) const { +ObjCCategoryDecl *ObjCInterfaceDecl::FindCategoryDeclaration( + const IdentifierInfo *CategoryId) const { // FIXME: Should make sure no callers ever do this. if (!hasDefinition()) return nullptr; @@ -1782,10 +1832,10 @@ void ObjCIvarDecl::anchor() {} ObjCIvarDecl *ObjCIvarDecl::Create(ASTContext &C, ObjCContainerDecl *DC, SourceLocation StartLoc, - SourceLocation IdLoc, IdentifierInfo *Id, - QualType T, TypeSourceInfo *TInfo, - AccessControl ac, Expr *BW, - bool synthesized) { + SourceLocation IdLoc, + const IdentifierInfo *Id, QualType T, + TypeSourceInfo *TInfo, AccessControl ac, + Expr *BW, bool synthesized) { if (DC) { // Ivar's can only appear in interfaces, implementations (via synthesized // properties), and class extensions (via direct declaration, or synthesized @@ -1816,14 +1866,14 @@ ObjCIvarDecl *ObjCIvarDecl::Create(ASTContext &C, ObjCContainerDecl *DC, synthesized); } -ObjCIvarDecl *ObjCIvarDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +ObjCIvarDecl *ObjCIvarDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) { return new (C, ID) ObjCIvarDecl(nullptr, SourceLocation(), SourceLocation(), nullptr, QualType(), nullptr, ObjCIvarDecl::None, nullptr, false); } -const ObjCInterfaceDecl *ObjCIvarDecl::getContainingInterface() const { - const auto *DC = cast<ObjCContainerDecl>(getDeclContext()); +ObjCInterfaceDecl *ObjCIvarDecl::getContainingInterface() { + auto *DC = cast<ObjCContainerDecl>(getDeclContext()); switch (DC->getKind()) { default: @@ -1833,7 +1883,7 @@ const ObjCInterfaceDecl *ObjCIvarDecl::getContainingInterface() const { // Ivars can only appear in class extension categories. case ObjCCategory: { - const auto *CD = cast<ObjCCategoryDecl>(DC); + auto *CD = cast<ObjCCategoryDecl>(DC); assert(CD->IsClassExtension() && "invalid container for ivar!"); return CD->getClassInterface(); } @@ -1865,7 +1915,7 @@ ObjCAtDefsFieldDecl } ObjCAtDefsFieldDecl *ObjCAtDefsFieldDecl::CreateDeserialized(ASTContext &C, - unsigned ID) { + GlobalDeclID ID) { return new (C, ID) ObjCAtDefsFieldDecl(nullptr, SourceLocation(), SourceLocation(), nullptr, QualType(), nullptr); @@ -1900,7 +1950,7 @@ ObjCProtocolDecl *ObjCProtocolDecl::Create(ASTContext &C, DeclContext *DC, } ObjCProtocolDecl *ObjCProtocolDecl::CreateDeserialized(ASTContext &C, - unsigned ID) { + GlobalDeclID ID) { ObjCProtocolDecl *Result = new (C, ID) ObjCProtocolDecl(C, nullptr, nullptr, SourceLocation(), SourceLocation(), nullptr); @@ -1967,6 +2017,7 @@ void ObjCProtocolDecl::allocateDefinitionData() { assert(!Data.getPointer() && "Protocol already has a definition!"); Data.setPointer(new (getASTContext()) DefinitionData); Data.getPointer()->Definition = this; + Data.getPointer()->HasODRHash = false; } void ObjCProtocolDecl::startDefinition() { @@ -1977,19 +2028,28 @@ void ObjCProtocolDecl::startDefinition() { RD->Data = this->Data; } -void ObjCProtocolDecl::collectPropertiesToImplement(PropertyMap &PM, - PropertyDeclOrder &PO) const { +void ObjCProtocolDecl::startDuplicateDefinitionForComparison() { + Data.setPointer(nullptr); + allocateDefinitionData(); + // Don't propagate data to other redeclarations. +} + +void ObjCProtocolDecl::mergeDuplicateDefinitionWithCommon( + const ObjCProtocolDecl *Definition) { + Data = Definition->Data; +} + +void ObjCProtocolDecl::collectPropertiesToImplement(PropertyMap &PM) const { if (const ObjCProtocolDecl *PDecl = getDefinition()) { for (auto *Prop : PDecl->properties()) { // Insert into PM if not there already. PM.insert(std::make_pair( std::make_pair(Prop->getIdentifier(), Prop->isClassProperty()), Prop)); - PO.push_back(Prop); } // Scan through protocol's protocols. for (const auto *PI : PDecl->protocols()) - PI->collectPropertiesToImplement(PM, PO); + PI->collectPropertiesToImplement(PM); } } @@ -2021,34 +2081,56 @@ ObjCProtocolDecl::getObjCRuntimeNameAsString() const { return getName(); } +unsigned ObjCProtocolDecl::getODRHash() { + assert(hasDefinition() && "ODRHash only for records with definitions"); + + // Previously calculated hash is stored in DefinitionData. + if (hasODRHash()) + return data().ODRHash; + + // Only calculate hash on first call of getODRHash per record. + ODRHash Hasher; + Hasher.AddObjCProtocolDecl(getDefinition()); + data().ODRHash = Hasher.CalculateHash(); + setHasODRHash(true); + + return data().ODRHash; +} + +bool ObjCProtocolDecl::hasODRHash() const { + if (!hasDefinition()) + return false; + return data().HasODRHash; +} + +void ObjCProtocolDecl::setHasODRHash(bool HasHash) { + assert(hasDefinition() && "Cannot set ODRHash without definition"); + data().HasODRHash = HasHash; +} + //===----------------------------------------------------------------------===// // ObjCCategoryDecl //===----------------------------------------------------------------------===// void ObjCCategoryDecl::anchor() {} -ObjCCategoryDecl::ObjCCategoryDecl(DeclContext *DC, SourceLocation AtLoc, - SourceLocation ClassNameLoc, - SourceLocation CategoryNameLoc, - IdentifierInfo *Id, ObjCInterfaceDecl *IDecl, - ObjCTypeParamList *typeParamList, - SourceLocation IvarLBraceLoc, - SourceLocation IvarRBraceLoc) +ObjCCategoryDecl::ObjCCategoryDecl( + DeclContext *DC, SourceLocation AtLoc, SourceLocation ClassNameLoc, + SourceLocation CategoryNameLoc, const IdentifierInfo *Id, + ObjCInterfaceDecl *IDecl, ObjCTypeParamList *typeParamList, + SourceLocation IvarLBraceLoc, SourceLocation IvarRBraceLoc) : ObjCContainerDecl(ObjCCategory, DC, Id, ClassNameLoc, AtLoc), ClassInterface(IDecl), CategoryNameLoc(CategoryNameLoc), IvarLBraceLoc(IvarLBraceLoc), IvarRBraceLoc(IvarRBraceLoc) { setTypeParamList(typeParamList); } -ObjCCategoryDecl *ObjCCategoryDecl::Create(ASTContext &C, DeclContext *DC, - SourceLocation AtLoc, - SourceLocation ClassNameLoc, - SourceLocation CategoryNameLoc, - IdentifierInfo *Id, - ObjCInterfaceDecl *IDecl, - ObjCTypeParamList *typeParamList, - SourceLocation IvarLBraceLoc, - SourceLocation IvarRBraceLoc) { +ObjCCategoryDecl *ObjCCategoryDecl::Create( + ASTContext &C, DeclContext *DC, SourceLocation AtLoc, + SourceLocation ClassNameLoc, SourceLocation CategoryNameLoc, + const IdentifierInfo *Id, ObjCInterfaceDecl *IDecl, + ObjCTypeParamList *typeParamList, SourceLocation IvarLBraceLoc, + SourceLocation IvarRBraceLoc) { auto *CatDecl = new (C, DC) ObjCCategoryDecl(DC, AtLoc, ClassNameLoc, CategoryNameLoc, Id, IDecl, typeParamList, IvarLBraceLoc, @@ -2067,7 +2149,7 @@ ObjCCategoryDecl *ObjCCategoryDecl::Create(ASTContext &C, DeclContext *DC, } ObjCCategoryDecl *ObjCCategoryDecl::CreateDeserialized(ASTContext &C, - unsigned ID) { + GlobalDeclID ID) { return new (C, ID) ObjCCategoryDecl(nullptr, SourceLocation(), SourceLocation(), SourceLocation(), nullptr, nullptr, nullptr); @@ -2097,21 +2179,18 @@ void ObjCCategoryDecl::setTypeParamList(ObjCTypeParamList *TPL) { void ObjCCategoryImplDecl::anchor() {} -ObjCCategoryImplDecl * -ObjCCategoryImplDecl::Create(ASTContext &C, DeclContext *DC, - IdentifierInfo *Id, - ObjCInterfaceDecl *ClassInterface, - SourceLocation nameLoc, - SourceLocation atStartLoc, - SourceLocation CategoryNameLoc) { +ObjCCategoryImplDecl *ObjCCategoryImplDecl::Create( + ASTContext &C, DeclContext *DC, const IdentifierInfo *Id, + ObjCInterfaceDecl *ClassInterface, SourceLocation nameLoc, + SourceLocation atStartLoc, SourceLocation CategoryNameLoc) { if (ClassInterface && ClassInterface->hasDefinition()) ClassInterface = ClassInterface->getDefinition(); return new (C, DC) ObjCCategoryImplDecl(DC, Id, ClassInterface, nameLoc, atStartLoc, CategoryNameLoc); } -ObjCCategoryImplDecl *ObjCCategoryImplDecl::CreateDeserialized(ASTContext &C, - unsigned ID) { +ObjCCategoryImplDecl * +ObjCCategoryImplDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) { return new (C, ID) ObjCCategoryImplDecl(nullptr, nullptr, nullptr, SourceLocation(), SourceLocation(), SourceLocation()); @@ -2218,7 +2297,7 @@ ObjCImplementationDecl::Create(ASTContext &C, DeclContext *DC, } ObjCImplementationDecl * -ObjCImplementationDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +ObjCImplementationDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) { return new (C, ID) ObjCImplementationDecl(nullptr, nullptr, nullptr, SourceLocation(), SourceLocation()); } @@ -2261,7 +2340,7 @@ ObjCCompatibleAliasDecl::Create(ASTContext &C, DeclContext *DC, } ObjCCompatibleAliasDecl * -ObjCCompatibleAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +ObjCCompatibleAliasDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) { return new (C, ID) ObjCCompatibleAliasDecl(nullptr, SourceLocation(), nullptr, nullptr); } @@ -2272,20 +2351,17 @@ ObjCCompatibleAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) { void ObjCPropertyDecl::anchor() {} -ObjCPropertyDecl *ObjCPropertyDecl::Create(ASTContext &C, DeclContext *DC, - SourceLocation L, - IdentifierInfo *Id, - SourceLocation AtLoc, - SourceLocation LParenLoc, - QualType T, - TypeSourceInfo *TSI, - PropertyControl propControl) { +ObjCPropertyDecl * +ObjCPropertyDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L, + const IdentifierInfo *Id, SourceLocation AtLoc, + SourceLocation LParenLoc, QualType T, + TypeSourceInfo *TSI, PropertyControl propControl) { return new (C, DC) ObjCPropertyDecl(DC, L, Id, AtLoc, LParenLoc, T, TSI, propControl); } ObjCPropertyDecl *ObjCPropertyDecl::CreateDeserialized(ASTContext &C, - unsigned ID) { + GlobalDeclID ID) { return new (C, ID) ObjCPropertyDecl(nullptr, SourceLocation(), nullptr, SourceLocation(), SourceLocation(), QualType(), nullptr, None); @@ -2317,8 +2393,8 @@ ObjCPropertyImplDecl *ObjCPropertyImplDecl::Create(ASTContext &C, ivarLoc); } -ObjCPropertyImplDecl *ObjCPropertyImplDecl::CreateDeserialized(ASTContext &C, - unsigned ID) { +ObjCPropertyImplDecl * +ObjCPropertyImplDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) { return new (C, ID) ObjCPropertyImplDecl(nullptr, SourceLocation(), SourceLocation(), nullptr, Dynamic, nullptr, SourceLocation()); diff --git a/contrib/llvm-project/clang/lib/AST/DeclOpenMP.cpp b/contrib/llvm-project/clang/lib/AST/DeclOpenMP.cpp index 867ef31656f7..81ca48e60942 100644 --- a/contrib/llvm-project/clang/lib/AST/DeclOpenMP.cpp +++ b/contrib/llvm-project/clang/lib/AST/DeclOpenMP.cpp @@ -30,13 +30,13 @@ OMPThreadPrivateDecl *OMPThreadPrivateDecl::Create(ASTContext &C, SourceLocation L, ArrayRef<Expr *> VL) { auto *D = OMPDeclarativeDirective::createDirective<OMPThreadPrivateDecl>( - C, DC, llvm::None, VL.size(), L); + C, DC, std::nullopt, VL.size(), L); D->setVars(VL); return D; } OMPThreadPrivateDecl *OMPThreadPrivateDecl::CreateDeserialized(ASTContext &C, - unsigned ID, + GlobalDeclID ID, unsigned N) { return OMPDeclarativeDirective::createEmptyDirective<OMPThreadPrivateDecl>( C, ID, 0, N); @@ -63,7 +63,8 @@ OMPAllocateDecl *OMPAllocateDecl::Create(ASTContext &C, DeclContext *DC, return D; } -OMPAllocateDecl *OMPAllocateDecl::CreateDeserialized(ASTContext &C, unsigned ID, +OMPAllocateDecl *OMPAllocateDecl::CreateDeserialized(ASTContext &C, + GlobalDeclID ID, unsigned NVars, unsigned NClauses) { return OMPDeclarativeDirective::createEmptyDirective<OMPAllocateDecl>( @@ -89,7 +90,8 @@ OMPRequiresDecl *OMPRequiresDecl::Create(ASTContext &C, DeclContext *DC, L); } -OMPRequiresDecl *OMPRequiresDecl::CreateDeserialized(ASTContext &C, unsigned ID, +OMPRequiresDecl *OMPRequiresDecl::CreateDeserialized(ASTContext &C, + GlobalDeclID ID, unsigned N) { return OMPDeclarativeDirective::createEmptyDirective<OMPRequiresDecl>( C, ID, N, 0, SourceLocation()); @@ -104,7 +106,7 @@ OMPDeclareReductionDecl::OMPDeclareReductionDecl( QualType Ty, OMPDeclareReductionDecl *PrevDeclInScope) : ValueDecl(DK, DC, L, Name, Ty), DeclContext(DK), Combiner(nullptr), PrevDeclInScope(PrevDeclInScope) { - setInitializer(nullptr, CallInit); + setInitializer(nullptr, OMPDeclareReductionInitKind::Call); } void OMPDeclareReductionDecl::anchor() {} @@ -117,7 +119,7 @@ OMPDeclareReductionDecl *OMPDeclareReductionDecl::Create( } OMPDeclareReductionDecl * -OMPDeclareReductionDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +OMPDeclareReductionDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) { return new (C, ID) OMPDeclareReductionDecl( OMPDeclareReduction, /*DC=*/nullptr, SourceLocation(), DeclarationName(), QualType(), /*PrevDeclInScope=*/nullptr); @@ -148,7 +150,7 @@ OMPDeclareMapperDecl *OMPDeclareMapperDecl::Create( } OMPDeclareMapperDecl *OMPDeclareMapperDecl::CreateDeserialized(ASTContext &C, - unsigned ID, + GlobalDeclID ID, unsigned N) { return OMPDeclarativeDirective::createEmptyDirective<OMPDeclareMapperDecl>( C, ID, N, 1, SourceLocation(), DeclarationName(), QualType(), @@ -179,7 +181,7 @@ OMPCapturedExprDecl *OMPCapturedExprDecl::Create(ASTContext &C, DeclContext *DC, } OMPCapturedExprDecl *OMPCapturedExprDecl::CreateDeserialized(ASTContext &C, - unsigned ID) { + GlobalDeclID ID) { return new (C, ID) OMPCapturedExprDecl(C, nullptr, nullptr, QualType(), /*TInfo=*/nullptr, SourceLocation()); } diff --git a/contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp b/contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp index 4dcf3d0e6ab1..26773a69ab9a 100644 --- a/contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp +++ b/contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp @@ -21,6 +21,7 @@ #include "clang/AST/ExprCXX.h" #include "clang/AST/PrettyPrinter.h" #include "clang/Basic/Module.h" +#include "clang/Basic/SourceManager.h" #include "llvm/Support/raw_ostream.h" using namespace clang; @@ -72,6 +73,7 @@ namespace { void VisitLabelDecl(LabelDecl *D); void VisitParmVarDecl(ParmVarDecl *D); void VisitFileScopeAsmDecl(FileScopeAsmDecl *D); + void VisitTopLevelStmtDecl(TopLevelStmtDecl *D); void VisitImportDecl(ImportDecl *D); void VisitStaticAssertDecl(StaticAssertDecl *D); void VisitNamespaceDecl(NamespaceDecl *D); @@ -108,16 +110,18 @@ namespace { void VisitOMPCapturedExprDecl(OMPCapturedExprDecl *D); void VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *TTP); void VisitNonTypeTemplateParmDecl(const NonTypeTemplateParmDecl *NTTP); + void VisitHLSLBufferDecl(HLSLBufferDecl *D); void printTemplateParameters(const TemplateParameterList *Params, bool OmitTemplateKW = false); void printTemplateArguments(llvm::ArrayRef<TemplateArgument> Args, - const TemplateParameterList *Params, - bool TemplOverloaded); + const TemplateParameterList *Params); void printTemplateArguments(llvm::ArrayRef<TemplateArgumentLoc> Args, - const TemplateParameterList *Params, - bool TemplOverloaded); - void prettyPrintAttributes(Decl *D); + const TemplateParameterList *Params); + enum class AttrPosAsWritten { Default = 0, Left, Right }; + bool + prettyPrintAttributes(const Decl *D, + AttrPosAsWritten Pos = AttrPosAsWritten::Default); void prettyPrintPragmas(Decl *D); void printDeclType(QualType T, StringRef DeclName, bool Pack = false); }; @@ -153,11 +157,14 @@ static QualType GetBaseType(QualType T) { while (!BaseType->isSpecifierType()) { if (const PointerType *PTy = BaseType->getAs<PointerType>()) BaseType = PTy->getPointeeType(); + else if (const ObjCObjectPointerType *OPT = + BaseType->getAs<ObjCObjectPointerType>()) + BaseType = OPT->getPointeeType(); else if (const BlockPointerType *BPy = BaseType->getAs<BlockPointerType>()) BaseType = BPy->getPointeeType(); - else if (const ArrayType* ATy = dyn_cast<ArrayType>(BaseType)) + else if (const ArrayType *ATy = dyn_cast<ArrayType>(BaseType)) BaseType = ATy->getElementType(); - else if (const FunctionType* FTy = BaseType->getAs<FunctionType>()) + else if (const FunctionType *FTy = BaseType->getAs<FunctionType>()) BaseType = FTy->getReturnType(); else if (const VectorType *VTy = BaseType->getAs<VectorType>()) BaseType = VTy->getElementType(); @@ -231,26 +238,55 @@ raw_ostream& DeclPrinter::Indent(unsigned Indentation) { return Out; } -void DeclPrinter::prettyPrintAttributes(Decl *D) { - if (Policy.PolishForDeclaration) - return; +static DeclPrinter::AttrPosAsWritten getPosAsWritten(const Attr *A, + const Decl *D) { + SourceLocation ALoc = A->getLoc(); + SourceLocation DLoc = D->getLocation(); + const ASTContext &C = D->getASTContext(); + if (ALoc.isInvalid() || DLoc.isInvalid()) + return DeclPrinter::AttrPosAsWritten::Left; + + if (C.getSourceManager().isBeforeInTranslationUnit(ALoc, DLoc)) + return DeclPrinter::AttrPosAsWritten::Left; + + return DeclPrinter::AttrPosAsWritten::Right; +} + +// returns true if an attribute was printed. +bool DeclPrinter::prettyPrintAttributes(const Decl *D, + AttrPosAsWritten Pos /*=Default*/) { + bool hasPrinted = false; if (D->hasAttrs()) { - AttrVec &Attrs = D->getAttrs(); + const AttrVec &Attrs = D->getAttrs(); for (auto *A : Attrs) { if (A->isInherited() || A->isImplicit()) continue; + // Print out the keyword attributes, they aren't regular attributes. + if (Policy.PolishForDeclaration && !A->isKeywordAttribute()) + continue; switch (A->getKind()) { #define ATTR(X) #define PRAGMA_SPELLING_ATTR(X) case attr::X: #include "clang/Basic/AttrList.inc" break; default: - A->printPretty(Out, Policy); + AttrPosAsWritten APos = getPosAsWritten(A, D); + assert(APos != AttrPosAsWritten::Default && + "Default not a valid for an attribute location"); + if (Pos == AttrPosAsWritten::Default || Pos == APos) { + if (Pos != AttrPosAsWritten::Left) + Out << ' '; + A->printPretty(Out, Policy); + hasPrinted = true; + if (Pos == AttrPosAsWritten::Left) + Out << ' '; + } break; } } } + return hasPrinted; } void DeclPrinter::prettyPrintPragmas(Decl *D) { @@ -306,6 +342,8 @@ void DeclPrinter::PrintConstructorInitializers(CXXConstructorDecl *CDecl, for (const auto *BMInitializer : CDecl->inits()) { if (BMInitializer->isInClassMemberInitializer()) continue; + if (!BMInitializer->isWritten()) + continue; if (!HasInitializerList) { Proto += " : "; @@ -318,15 +356,18 @@ void DeclPrinter::PrintConstructorInitializers(CXXConstructorDecl *CDecl, if (BMInitializer->isAnyMemberInitializer()) { FieldDecl *FD = BMInitializer->getAnyMember(); Out << *FD; + } else if (BMInitializer->isDelegatingInitializer()) { + Out << CDecl->getNameAsString(); } else { Out << QualType(BMInitializer->getBaseClass(), 0).getAsString(Policy); } - Out << "("; - if (!BMInitializer->getInit()) { - // Nothing to print - } else { - Expr *Init = BMInitializer->getInit(); + if (Expr *Init = BMInitializer->getInit()) { + bool OutParens = !isa<InitListExpr>(Init); + + if (OutParens) + Out << "("; + if (ExprWithCleanups *Tmp = dyn_cast<ExprWithCleanups>(Init)) Init = Tmp->getSubExpr(); @@ -360,8 +401,13 @@ void DeclPrinter::PrintConstructorInitializers(CXXConstructorDecl *CDecl, &Context); } } + + if (OutParens) + Out << ")"; + } else { + Out << "()"; } - Out << ")"; + if (BMInitializer->isPackExpansion()) Out << "..."; } @@ -452,21 +498,18 @@ void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) { else if (isa<ObjCMethodDecl>(*D) && cast<ObjCMethodDecl>(*D)->hasBody()) Terminator = nullptr; else if (auto FD = dyn_cast<FunctionDecl>(*D)) { - if (FD->isThisDeclarationADefinition()) + if (FD->doesThisDeclarationHaveABody() && !FD->isDefaulted()) Terminator = nullptr; else Terminator = ";"; } else if (auto TD = dyn_cast<FunctionTemplateDecl>(*D)) { - if (TD->getTemplatedDecl()->isThisDeclarationADefinition()) + if (TD->getTemplatedDecl()->doesThisDeclarationHaveABody()) Terminator = nullptr; else Terminator = ";"; - } else if (isa<NamespaceDecl>(*D) || isa<LinkageSpecDecl>(*D) || - isa<ObjCImplementationDecl>(*D) || - isa<ObjCInterfaceDecl>(*D) || - isa<ObjCProtocolDecl>(*D) || - isa<ObjCCategoryImplDecl>(*D) || - isa<ObjCCategoryDecl>(*D)) + } else if (isa<NamespaceDecl, LinkageSpecDecl, ObjCImplementationDecl, + ObjCInterfaceDecl, ObjCProtocolDecl, ObjCCategoryImplDecl, + ObjCCategoryDecl, HLSLBufferDecl>(*D)) Terminator = nullptr; else if (isa<EnumConstantDecl>(*D)) { DeclContext::decl_iterator Next = D; @@ -587,13 +630,25 @@ static void printExplicitSpecifier(ExplicitSpecifier ES, llvm::raw_ostream &Out, } EOut << " "; EOut.flush(); - Out << EOut.str(); + Out << Proto; +} + +static void MaybePrintTagKeywordIfSupressingScopes(PrintingPolicy &Policy, + QualType T, + llvm::raw_ostream &Out) { + StringRef prefix = T->isClassType() ? "class " + : T->isStructureType() ? "struct " + : T->isUnionType() ? "union " + : ""; + Out << prefix; } void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) { if (!D->getDescribedFunctionTemplate() && - !D->isFunctionTemplateSpecialization()) + !D->isFunctionTemplateSpecialization()) { prettyPrintPragmas(D); + prettyPrintAttributes(D, AttrPosAsWritten::Left); + } if (D->isFunctionTemplateSpecialization()) Out << "template<> "; @@ -622,6 +677,8 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) { if (D->isConstexprSpecified() && !D->isExplicitlyDefaulted()) Out << "constexpr "; if (D->isConsteval()) Out << "consteval "; + else if (D->isImmediateFunction()) + Out << "immediate "; ExplicitSpecifier ExplicitSpec = ExplicitSpecifier::getFromDecl(D); if (ExplicitSpec.isSpecified()) printExplicitSpecifier(ExplicitSpec, Out, Policy, Indentation, Context); @@ -649,16 +706,11 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) { llvm::raw_string_ostream POut(Proto); DeclPrinter TArgPrinter(POut, SubPolicy, Context, Indentation); const auto *TArgAsWritten = D->getTemplateSpecializationArgsAsWritten(); - const TemplateParameterList *TPL = D->getTemplateSpecializationInfo() - ->getTemplate() - ->getTemplateParameters(); if (TArgAsWritten && !Policy.PrintCanonicalTypes) - TArgPrinter.printTemplateArguments(TArgAsWritten->arguments(), TPL, - /*TemplOverloaded*/ true); + TArgPrinter.printTemplateArguments(TArgAsWritten->arguments(), nullptr); else if (const TemplateArgumentList *TArgs = D->getTemplateSpecializationArgs()) - TArgPrinter.printTemplateArguments(TArgs->asArray(), TPL, - /*TemplOverloaded*/ true); + TArgPrinter.printTemplateArguments(TArgs->asArray(), nullptr); } QualType Ty = D->getType(); @@ -684,6 +736,10 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) { if (FT->isVariadic()) { if (D->getNumParams()) POut << ", "; POut << "..."; + } else if (!D->getNumParams() && !Context.getLangOpts().CPlusPlus) { + // The function has a prototype, so it needs to retain the prototype + // in C. + POut << "void"; } } else if (D->doesThisDeclarationHaveABody() && !D->hasPrototype()) { for (unsigned i = 0, e = D->getNumParams(); i != e; ++i) { @@ -735,7 +791,6 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) { FT->getNoexceptExpr()->printPretty(EOut, nullptr, SubPolicy, Indentation, "\n", &Context); EOut.flush(); - Proto += EOut.str(); Proto += ")"; } } @@ -750,6 +805,10 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) { Out << Proto << " -> "; Proto.clear(); } + if (!Policy.SuppressTagKeyword && Policy.SuppressScope && + !Policy.SuppressUnwrittenScope) + MaybePrintTagKeywordIfSupressingScopes(Policy, AFT->getReturnType(), + Out); AFT->getReturnType().print(Out, Policy, Proto); Proto.clear(); } @@ -764,13 +823,18 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) { Ty.print(Out, Policy, Proto); } - prettyPrintAttributes(D); + prettyPrintAttributes(D, AttrPosAsWritten::Right); - if (D->isPure()) + if (D->isPureVirtual()) Out << " = 0"; - else if (D->isDeletedAsWritten()) + else if (D->isDeletedAsWritten()) { Out << " = delete"; - else if (D->isExplicitlyDefaulted()) + if (const StringLiteral *M = D->getDeletedMessage()) { + Out << "("; + M->outputString(Out); + Out << ")"; + } + } else if (D->isExplicitlyDefaulted()) Out << " = default"; else if (D->doesThisDeclarationHaveABody()) { if (!Policy.TerseOutput) { @@ -786,11 +850,10 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) { Out << ";\n"; } Indentation -= Policy.Indentation; - } else - Out << ' '; + } if (D->getBody()) - D->getBody()->printPretty(Out, nullptr, SubPolicy, Indentation, "\n", + D->getBody()->printPrettyControlled(Out, nullptr, SubPolicy, Indentation, "\n", &Context); } else { if (!Policy.TerseOutput && isa<CXXConstructorDecl>(*D)) @@ -858,6 +921,12 @@ void DeclPrinter::VisitLabelDecl(LabelDecl *D) { void DeclPrinter::VisitVarDecl(VarDecl *D) { prettyPrintPragmas(D); + prettyPrintAttributes(D, AttrPosAsWritten::Left); + + if (const auto *Param = dyn_cast<ParmVarDecl>(D); + Param && Param->isExplicitObjectParameter()) + Out << "this "; + QualType T = D->getTypeSourceInfo() ? D->getTypeSourceInfo()->getType() : D->getASTContext().getUnqualifiedObjCPointerType(D->getType()); @@ -890,16 +959,29 @@ void DeclPrinter::VisitVarDecl(VarDecl *D) { } } - printDeclType(T, D->getName()); + if (!Policy.SuppressTagKeyword && Policy.SuppressScope && + !Policy.SuppressUnwrittenScope) + MaybePrintTagKeywordIfSupressingScopes(Policy, T, Out); + + printDeclType(T, (isa<ParmVarDecl>(D) && Policy.CleanUglifiedParameters && + D->getIdentifier()) + ? D->getIdentifier()->deuglifiedName() + : D->getName()); + + prettyPrintAttributes(D, AttrPosAsWritten::Right); + Expr *Init = D->getInit(); if (!Policy.SuppressInitializers && Init) { bool ImplicitInit = false; - if (CXXConstructExpr *Construct = - dyn_cast<CXXConstructExpr>(Init->IgnoreImplicit())) { + if (D->isCXXForRangeDecl()) { + // FIXME: We should print the range expression instead. + ImplicitInit = true; + } else if (CXXConstructExpr *Construct = + dyn_cast<CXXConstructExpr>(Init->IgnoreImplicit())) { if (D->getInitStyle() == VarDecl::CallInit && !Construct->isListInitialization()) { ImplicitInit = Construct->getNumArgs() == 0 || - Construct->getArg(0)->isDefaultArgument(); + Construct->getArg(0)->isDefaultArgument(); } } if (!ImplicitInit) { @@ -916,7 +998,6 @@ void DeclPrinter::VisitVarDecl(VarDecl *D) { Out << ")"; } } - prettyPrintAttributes(D); } void DeclPrinter::VisitParmVarDecl(ParmVarDecl *D) { @@ -930,6 +1011,11 @@ void DeclPrinter::VisitFileScopeAsmDecl(FileScopeAsmDecl *D) { Out << ")"; } +void DeclPrinter::VisitTopLevelStmtDecl(TopLevelStmtDecl *D) { + assert(D->getStmt()); + D->getStmt()->printPretty(Out, nullptr, Policy, Indentation, "\n", &Context); +} + void DeclPrinter::VisitImportDecl(ImportDecl *D) { Out << "@import " << D->getImportedModule()->getFullModuleName() << ";\n"; @@ -939,9 +1025,9 @@ void DeclPrinter::VisitStaticAssertDecl(StaticAssertDecl *D) { Out << "static_assert("; D->getAssertExpr()->printPretty(Out, nullptr, Policy, Indentation, "\n", &Context); - if (StringLiteral *SL = D->getMessage()) { + if (Expr *E = D->getMessage()) { Out << ", "; - SL->printPretty(Out, nullptr, Policy, Indentation, "\n", &Context); + E->printPretty(Out, nullptr, Policy, Indentation, "\n", &Context); } Out << ")"; } @@ -984,30 +1070,38 @@ void DeclPrinter::VisitCXXRecordDecl(CXXRecordDecl *D) { // FIXME: add printing of pragma attributes if required. if (!Policy.SuppressSpecifiers && D->isModulePrivate()) Out << "__module_private__ "; - Out << D->getKindName(); - prettyPrintAttributes(D); + Out << D->getKindName() << ' '; - if (D->getIdentifier()) { - Out << ' ' << *D; + // FIXME: Move before printing the decl kind to match the behavior of the + // attribute printing for variables and function where they are printed first. + if (prettyPrintAttributes(D, AttrPosAsWritten::Left)) + Out << ' '; - if (auto S = dyn_cast<ClassTemplateSpecializationDecl>(D)) { - ArrayRef<TemplateArgument> Args = S->getTemplateArgs().asArray(); - if (!Policy.PrintCanonicalTypes) - if (const auto* TSI = S->getTypeAsWritten()) - if (const auto *TST = - dyn_cast<TemplateSpecializationType>(TSI->getType())) - Args = TST->template_arguments(); - printTemplateArguments( - Args, S->getSpecializedTemplate()->getTemplateParameters(), - /*TemplOverloaded*/ false); + if (D->getIdentifier()) { + if (auto *NNS = D->getQualifier()) + NNS->print(Out, Policy); + Out << *D; + + if (auto *S = dyn_cast<ClassTemplateSpecializationDecl>(D)) { + const TemplateParameterList *TParams = + S->getSpecializedTemplate()->getTemplateParameters(); + const ASTTemplateArgumentListInfo *TArgAsWritten = + S->getTemplateArgsAsWritten(); + if (TArgAsWritten && !Policy.PrintCanonicalTypes) + printTemplateArguments(TArgAsWritten->arguments(), TParams); + else + printTemplateArguments(S->getTemplateArgs().asArray(), TParams); } } + prettyPrintAttributes(D, AttrPosAsWritten::Right); + if (D->isCompleteDefinition()) { + Out << ' '; // Print the base classes if (D->getNumBases()) { - Out << " : "; + Out << ": "; for (CXXRecordDecl::base_class_iterator Base = D->bases_begin(), BaseEnd = D->bases_end(); Base != BaseEnd; ++Base) { if (Base != D->bases_begin()) @@ -1026,14 +1120,15 @@ void DeclPrinter::VisitCXXRecordDecl(CXXRecordDecl *D) { if (Base->isPackExpansion()) Out << "..."; } + Out << ' '; } // Print the class definition // FIXME: Doesn't print access specifiers, e.g., "public:" if (Policy.TerseOutput) { - Out << " {}"; + Out << "{}"; } else { - Out << " {\n"; + Out << "{\n"; VisitDeclContext(D); Indent() << "}"; } @@ -1042,10 +1137,10 @@ void DeclPrinter::VisitCXXRecordDecl(CXXRecordDecl *D) { void DeclPrinter::VisitLinkageSpecDecl(LinkageSpecDecl *D) { const char *l; - if (D->getLanguage() == LinkageSpecDecl::lang_c) + if (D->getLanguage() == LinkageSpecLanguageIDs::C) l = "C"; else { - assert(D->getLanguage() == LinkageSpecDecl::lang_cxx && + assert(D->getLanguage() == LinkageSpecLanguageIDs::CXX && "unknown language in linkage specification"); l = "C++"; } @@ -1063,6 +1158,10 @@ void DeclPrinter::printTemplateParameters(const TemplateParameterList *Params, bool OmitTemplateKW) { assert(Params); + // Don't print invented template parameter lists. + if (!Params->empty() && Params->getParam(0)->isImplicit()) + return; + if (!OmitTemplateKW) Out << "template "; Out << '<'; @@ -1088,40 +1187,46 @@ void DeclPrinter::printTemplateParameters(const TemplateParameterList *Params, } Out << '>'; + + if (const Expr *RequiresClause = Params->getRequiresClause()) { + Out << " requires "; + RequiresClause->printPretty(Out, nullptr, Policy, Indentation, "\n", + &Context); + } + if (!OmitTemplateKW) Out << ' '; } void DeclPrinter::printTemplateArguments(ArrayRef<TemplateArgument> Args, - const TemplateParameterList *Params, - bool TemplOverloaded) { + const TemplateParameterList *Params) { Out << "<"; for (size_t I = 0, E = Args.size(); I < E; ++I) { if (I) Out << ", "; - if (TemplOverloaded || !Params) + if (!Params) Args[I].print(Policy, Out, /*IncludeType*/ true); else - Args[I].print( - Policy, Out, - TemplateParameterList::shouldIncludeTypeForArgument(Params, I)); + Args[I].print(Policy, Out, + TemplateParameterList::shouldIncludeTypeForArgument( + Policy, Params, I)); } Out << ">"; } void DeclPrinter::printTemplateArguments(ArrayRef<TemplateArgumentLoc> Args, - const TemplateParameterList *Params, - bool TemplOverloaded) { + const TemplateParameterList *Params) { Out << "<"; for (size_t I = 0, E = Args.size(); I < E; ++I) { if (I) Out << ", "; - if (TemplOverloaded) + if (!Params) Args[I].getArgument().print(Policy, Out, /*IncludeType*/ true); else Args[I].getArgument().print( Policy, Out, - TemplateParameterList::shouldIncludeTypeForArgument(Params, I)); + TemplateParameterList::shouldIncludeTypeForArgument(Policy, Params, + I)); } Out << ">"; } @@ -1131,15 +1236,22 @@ void DeclPrinter::VisitTemplateDecl(const TemplateDecl *D) { if (const TemplateTemplateParmDecl *TTP = dyn_cast<TemplateTemplateParmDecl>(D)) { - Out << "class"; + if (TTP->wasDeclaredWithTypename()) + Out << "typename"; + else + Out << "class"; if (TTP->isParameterPack()) Out << " ..."; else if (TTP->getDeclName()) Out << ' '; - if (TTP->getDeclName()) - Out << TTP->getDeclName(); + if (TTP->getDeclName()) { + if (Policy.CleanUglifiedParameters && TTP->getIdentifier()) + Out << TTP->getIdentifier()->deuglifiedName(); + else + Out << TTP->getDeclName(); + } } else if (auto *TD = D->getTemplatedDecl()) Visit(TD); else if (const auto *Concept = dyn_cast<ConceptDecl>(D)) { @@ -1191,6 +1303,7 @@ void DeclPrinter::VisitClassTemplateDecl(ClassTemplateDecl *D) { if (D->isThisDeclarationADefinition()) Out << ";"; Out << "\n"; + Indent(); Visit(I); } } @@ -1357,6 +1470,11 @@ void DeclPrinter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *OID) { return; } bool eolnOut = false; + if (OID->hasAttrs()) { + prettyPrintAttributes(OID); + Out << "\n"; + } + Out << "@interface " << I; if (auto TypeParams = OID->getTypeParamListAsWritten()) { @@ -1572,7 +1690,7 @@ void DeclPrinter::VisitObjCPropertyDecl(ObjCPropertyDecl *PDecl) { std::string TypeStr = PDecl->getASTContext().getUnqualifiedObjCPointerType(T). getAsString(Policy); Out << ' ' << TypeStr; - if (!StringRef(TypeStr).endswith("*")) + if (!StringRef(TypeStr).ends_with("*")) Out << ' '; Out << *PDecl; if (Policy.PolishForDeclaration) @@ -1645,6 +1763,21 @@ void DeclPrinter::VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D) { } } +void DeclPrinter::VisitHLSLBufferDecl(HLSLBufferDecl *D) { + if (D->isCBuffer()) + Out << "cbuffer "; + else + Out << "tbuffer "; + + Out << *D; + + prettyPrintAttributes(D); + + Out << " {\n"; + VisitDeclContext(D); + Indent() << "}"; +} + void DeclPrinter::VisitOMPAllocateDecl(OMPAllocateDecl *D) { Out << "#pragma omp allocate"; if (!D->varlist_empty()) { @@ -1658,10 +1791,11 @@ void DeclPrinter::VisitOMPAllocateDecl(OMPAllocateDecl *D) { Out << ")"; } if (!D->clauselist_empty()) { - Out << " "; OMPClausePrinter Printer(Out, Policy); - for (OMPClause *C : D->clauselists()) + for (OMPClause *C : D->clauselists()) { + Out << " "; Printer.Visit(C); + } } } @@ -1684,7 +1818,7 @@ void DeclPrinter::VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D) { Out << OpName; } else { assert(D->getDeclName().isIdentifier()); - D->printName(Out); + D->printName(Out, Policy); } Out << " : "; D->getType().print(Out, Policy); @@ -1694,17 +1828,17 @@ void DeclPrinter::VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D) { if (auto *Init = D->getInitializer()) { Out << " initializer("; switch (D->getInitializerKind()) { - case OMPDeclareReductionDecl::DirectInit: + case OMPDeclareReductionInitKind::Direct: Out << "omp_priv("; break; - case OMPDeclareReductionDecl::CopyInit: + case OMPDeclareReductionInitKind::Copy: Out << "omp_priv = "; break; - case OMPDeclareReductionDecl::CallInit: + case OMPDeclareReductionInitKind::Call: break; } Init->printPretty(Out, nullptr, Policy, 0, "\n", &Context); - if (D->getInitializerKind() == OMPDeclareReductionDecl::DirectInit) + if (D->getInitializerKind() == OMPDeclareReductionInitKind::Direct) Out << ")"; Out << ")"; } @@ -1714,7 +1848,7 @@ void DeclPrinter::VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D) { void DeclPrinter::VisitOMPDeclareMapperDecl(OMPDeclareMapperDecl *D) { if (!D->isInvalidDecl()) { Out << "#pragma omp declare mapper ("; - D->printName(Out); + D->printName(Out, Policy); Out << " : "; D->getType().print(Out, Policy); Out << " "; @@ -1747,12 +1881,17 @@ void DeclPrinter::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *TTP) { else if (TTP->getDeclName()) Out << ' '; - if (TTP->getDeclName()) - Out << TTP->getDeclName(); + if (TTP->getDeclName()) { + if (Policy.CleanUglifiedParameters && TTP->getIdentifier()) + Out << TTP->getIdentifier()->deuglifiedName(); + else + Out << TTP->getDeclName(); + } if (TTP->hasDefaultArgument()) { Out << " = "; - Out << TTP->getDefaultArgument().getAsString(Policy); + TTP->getDefaultArgument().getArgument().print(Policy, Out, + /*IncludeType=*/false); } } @@ -1760,12 +1899,13 @@ void DeclPrinter::VisitNonTypeTemplateParmDecl( const NonTypeTemplateParmDecl *NTTP) { StringRef Name; if (IdentifierInfo *II = NTTP->getIdentifier()) - Name = II->getName(); + Name = + Policy.CleanUglifiedParameters ? II->deuglifiedName() : II->getName(); printDeclType(NTTP->getType(), Name, NTTP->isParameterPack()); if (NTTP->hasDefaultArgument()) { Out << " = "; - NTTP->getDefaultArgument()->printPretty(Out, nullptr, Policy, Indentation, - "\n", &Context); + NTTP->getDefaultArgument().getArgument().print(Policy, Out, + /*IncludeType=*/false); } } diff --git a/contrib/llvm-project/clang/lib/AST/DeclTemplate.cpp b/contrib/llvm-project/clang/lib/AST/DeclTemplate.cpp index ec8b00a9eb7d..722c7fcf0b0d 100755 --- a/contrib/llvm-project/clang/lib/AST/DeclTemplate.cpp +++ b/contrib/llvm-project/clang/lib/AST/DeclTemplate.cpp @@ -26,8 +26,8 @@ #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/FoldingSet.h" -#include "llvm/ADT/None.h" #include "llvm/ADT/PointerUnion.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" @@ -35,6 +35,7 @@ #include <cassert> #include <cstdint> #include <memory> +#include <optional> #include <utility> using namespace clang; @@ -77,7 +78,7 @@ TemplateParameterList::TemplateParameterList(const ASTContext& C, if (TTP->hasTypeConstraint()) HasConstrainedParameters = true; } else { - llvm_unreachable("unexpcted template parameter type"); + llvm_unreachable("unexpected template parameter type"); } // FIXME: If a default argument contains an unexpanded parameter pack, the // template parameter list does too. @@ -126,11 +127,44 @@ TemplateParameterList::Create(const ASTContext &C, SourceLocation TemplateLoc, RAngleLoc, RequiresClause); } +void TemplateParameterList::Profile(llvm::FoldingSetNodeID &ID, + const ASTContext &C) const { + const Expr *RC = getRequiresClause(); + ID.AddBoolean(RC != nullptr); + if (RC) + RC->Profile(ID, C, /*Canonical=*/true); + ID.AddInteger(size()); + for (NamedDecl *D : *this) { + if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(D)) { + ID.AddInteger(0); + ID.AddBoolean(NTTP->isParameterPack()); + NTTP->getType().getCanonicalType().Profile(ID); + ID.AddBoolean(NTTP->hasPlaceholderTypeConstraint()); + if (const Expr *E = NTTP->getPlaceholderTypeConstraint()) + E->Profile(ID, C, /*Canonical=*/true); + continue; + } + if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(D)) { + ID.AddInteger(1); + ID.AddBoolean(TTP->isParameterPack()); + ID.AddBoolean(TTP->hasTypeConstraint()); + if (const TypeConstraint *TC = TTP->getTypeConstraint()) + TC->getImmediatelyDeclaredConstraint()->Profile(ID, C, + /*Canonical=*/true); + continue; + } + const auto *TTP = cast<TemplateTemplateParmDecl>(D); + ID.AddInteger(2); + ID.AddBoolean(TTP->isParameterPack()); + TTP->getTemplateParameters()->Profile(ID, C); + } +} + unsigned TemplateParameterList::getMinRequiredArguments() const { unsigned NumRequiredArgs = 0; for (const NamedDecl *P : asArray()) { if (P->isTemplateParameterPack()) { - if (Optional<unsigned> Expansions = getExpandedPackSize(P)) { + if (std::optional<unsigned> Expansions = getExpandedPackSize(P)) { NumRequiredArgs += *Expansions; continue; } @@ -165,14 +199,20 @@ unsigned TemplateParameterList::getDepth() const { return cast<TemplateTemplateParmDecl>(FirstParm)->getDepth(); } -static void AdoptTemplateParameterList(TemplateParameterList *Params, +static bool AdoptTemplateParameterList(TemplateParameterList *Params, DeclContext *Owner) { + bool Invalid = false; for (NamedDecl *P : *Params) { P->setDeclContext(Owner); if (const auto *TTP = dyn_cast<TemplateTemplateParmDecl>(P)) - AdoptTemplateParameterList(TTP->getTemplateParameters(), Owner); + if (AdoptTemplateParameterList(TTP->getTemplateParameters(), Owner)) + Invalid = true; + + if (P->isInvalidDecl()) + Invalid = true; } + return Invalid; } void TemplateParameterList:: @@ -196,8 +236,9 @@ bool TemplateParameterList::hasAssociatedConstraints() const { } bool TemplateParameterList::shouldIncludeTypeForArgument( - const TemplateParameterList *TPL, unsigned Idx) { - if (!TPL || Idx >= TPL->size()) + const PrintingPolicy &Policy, const TemplateParameterList *TPL, + unsigned Idx) { + if (!TPL || Idx >= TPL->size() || Policy.AlwaysIncludeTypeForTemplateArgument) return true; const NamedDecl *TemplParam = TPL->getParam(Idx); if (const auto *ParamValueDecl = @@ -242,6 +283,16 @@ bool TemplateDecl::hasAssociatedConstraints() const { return false; } +bool TemplateDecl::isTypeAlias() const { + switch (getKind()) { + case TemplateDecl::TypeAliasTemplate: + case TemplateDecl::BuiltinTemplate: + return true; + default: + return false; + }; +} + //===----------------------------------------------------------------------===// // RedeclarableTemplateDecl Implementation //===----------------------------------------------------------------------===// @@ -286,9 +337,10 @@ void RedeclarableTemplateDecl::loadLazySpecializationsImpl() const { CommonBase *CommonBasePtr = getMostRecentDecl()->getCommonPtr(); if (CommonBasePtr->LazySpecializations) { ASTContext &Context = getASTContext(); - uint32_t *Specs = CommonBasePtr->LazySpecializations; + GlobalDeclID *Specs = CommonBasePtr->LazySpecializations; CommonBasePtr->LazySpecializations = nullptr; - for (uint32_t I = 0, N = *Specs++; I != N; ++I) + unsigned SpecSize = (*Specs++).getRawValue(); + for (unsigned I = 0; I != SpecSize; ++I) (void)Context.getExternalSource()->GetExternalDecl(Specs[I]); } } @@ -335,22 +387,39 @@ void RedeclarableTemplateDecl::addSpecializationImpl( SETraits::getDecl(Entry)); } +ArrayRef<TemplateArgument> RedeclarableTemplateDecl::getInjectedTemplateArgs() { + TemplateParameterList *Params = getTemplateParameters(); + auto *CommonPtr = getCommonPtr(); + if (!CommonPtr->InjectedArgs) { + auto &Context = getASTContext(); + SmallVector<TemplateArgument, 16> TemplateArgs; + Context.getInjectedTemplateArgs(Params, TemplateArgs); + CommonPtr->InjectedArgs = + new (Context) TemplateArgument[TemplateArgs.size()]; + std::copy(TemplateArgs.begin(), TemplateArgs.end(), + CommonPtr->InjectedArgs); + } + + return llvm::ArrayRef(CommonPtr->InjectedArgs, Params->size()); +} + //===----------------------------------------------------------------------===// // FunctionTemplateDecl Implementation //===----------------------------------------------------------------------===// -FunctionTemplateDecl *FunctionTemplateDecl::Create(ASTContext &C, - DeclContext *DC, - SourceLocation L, - DeclarationName Name, - TemplateParameterList *Params, - NamedDecl *Decl) { - AdoptTemplateParameterList(Params, cast<DeclContext>(Decl)); - return new (C, DC) FunctionTemplateDecl(C, DC, L, Name, Params, Decl); +FunctionTemplateDecl * +FunctionTemplateDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L, + DeclarationName Name, + TemplateParameterList *Params, NamedDecl *Decl) { + bool Invalid = AdoptTemplateParameterList(Params, cast<DeclContext>(Decl)); + auto *TD = new (C, DC) FunctionTemplateDecl(C, DC, L, Name, Params, Decl); + if (Invalid) + TD->setInvalidDecl(); + return TD; } -FunctionTemplateDecl *FunctionTemplateDecl::CreateDeserialized(ASTContext &C, - unsigned ID) { +FunctionTemplateDecl * +FunctionTemplateDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) { return new (C, ID) FunctionTemplateDecl(C, nullptr, SourceLocation(), DeclarationName(), nullptr, nullptr); } @@ -384,22 +453,6 @@ void FunctionTemplateDecl::addSpecialization( InsertPos); } -ArrayRef<TemplateArgument> FunctionTemplateDecl::getInjectedTemplateArgs() { - TemplateParameterList *Params = getTemplateParameters(); - Common *CommonPtr = getCommonPtr(); - if (!CommonPtr->InjectedArgs) { - auto &Context = getASTContext(); - SmallVector<TemplateArgument, 16> TemplateArgs; - Context.getInjectedTemplateArgs(Params, TemplateArgs); - CommonPtr->InjectedArgs = - new (Context) TemplateArgument[TemplateArgs.size()]; - std::copy(TemplateArgs.begin(), TemplateArgs.end(), - CommonPtr->InjectedArgs); - } - - return llvm::makeArrayRef(CommonPtr->InjectedArgs, Params->size()); -} - void FunctionTemplateDecl::mergePrevDecl(FunctionTemplateDecl *Prev) { using Base = RedeclarableTemplateDecl; @@ -438,19 +491,20 @@ void FunctionTemplateDecl::mergePrevDecl(FunctionTemplateDecl *Prev) { // ClassTemplateDecl Implementation //===----------------------------------------------------------------------===// -ClassTemplateDecl *ClassTemplateDecl::Create(ASTContext &C, - DeclContext *DC, +ClassTemplateDecl *ClassTemplateDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L, DeclarationName Name, TemplateParameterList *Params, NamedDecl *Decl) { - AdoptTemplateParameterList(Params, cast<DeclContext>(Decl)); - - return new (C, DC) ClassTemplateDecl(C, DC, L, Name, Params, Decl); + bool Invalid = AdoptTemplateParameterList(Params, cast<DeclContext>(Decl)); + auto *TD = new (C, DC) ClassTemplateDecl(C, DC, L, Name, Params, Decl); + if (Invalid) + TD->setInvalidDecl(); + return TD; } ClassTemplateDecl *ClassTemplateDecl::CreateDeserialized(ASTContext &C, - unsigned ID) { + GlobalDeclID ID) { return new (C, ID) ClassTemplateDecl(C, nullptr, SourceLocation(), DeclarationName(), nullptr, nullptr); } @@ -497,44 +551,13 @@ ClassTemplateDecl::findPartialSpecialization( TPL); } -static void ProfileTemplateParameterList(ASTContext &C, - llvm::FoldingSetNodeID &ID, const TemplateParameterList *TPL) { - const Expr *RC = TPL->getRequiresClause(); - ID.AddBoolean(RC != nullptr); - if (RC) - RC->Profile(ID, C, /*Canonical=*/true); - ID.AddInteger(TPL->size()); - for (NamedDecl *D : *TPL) { - if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(D)) { - ID.AddInteger(0); - ID.AddBoolean(NTTP->isParameterPack()); - NTTP->getType().getCanonicalType().Profile(ID); - continue; - } - if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(D)) { - ID.AddInteger(1); - ID.AddBoolean(TTP->isParameterPack()); - ID.AddBoolean(TTP->hasTypeConstraint()); - if (const TypeConstraint *TC = TTP->getTypeConstraint()) - TC->getImmediatelyDeclaredConstraint()->Profile(ID, C, - /*Canonical=*/true); - continue; - } - const auto *TTP = cast<TemplateTemplateParmDecl>(D); - ID.AddInteger(2); - ID.AddBoolean(TTP->isParameterPack()); - ProfileTemplateParameterList(C, ID, TTP->getTemplateParameters()); - } -} - -void -ClassTemplatePartialSpecializationDecl::Profile(llvm::FoldingSetNodeID &ID, - ArrayRef<TemplateArgument> TemplateArgs, TemplateParameterList *TPL, - ASTContext &Context) { +void ClassTemplatePartialSpecializationDecl::Profile( + llvm::FoldingSetNodeID &ID, ArrayRef<TemplateArgument> TemplateArgs, + TemplateParameterList *TPL, const ASTContext &Context) { ID.AddInteger(TemplateArgs.size()); for (const TemplateArgument &TemplateArg : TemplateArgs) TemplateArg.Profile(ID, Context); - ProfileTemplateParameterList(Context, ID, TPL); + TPL->Profile(ID, Context); } void ClassTemplateDecl::AddPartialSpecialization( @@ -604,9 +627,10 @@ ClassTemplateDecl::getInjectedClassNameSpecialization() { TemplateParameterList *Params = getTemplateParameters(); SmallVector<TemplateArgument, 16> TemplateArgs; Context.getInjectedTemplateArgs(Params, TemplateArgs); - CommonPtr->InjectedClassNameType - = Context.getTemplateSpecializationType(TemplateName(this), - TemplateArgs); + TemplateName Name = Context.getQualifiedTemplateName( + /*NNS=*/nullptr, /*TemplateKeyword=*/false, TemplateName(this)); + CommonPtr->InjectedClassNameType = + Context.getTemplateSpecializationType(Name, TemplateArgs); return CommonPtr->InjectedClassNameType; } @@ -614,13 +638,11 @@ ClassTemplateDecl::getInjectedClassNameSpecialization() { // TemplateTypeParm Allocation/Deallocation Method Implementations //===----------------------------------------------------------------------===// -TemplateTypeParmDecl * -TemplateTypeParmDecl::Create(const ASTContext &C, DeclContext *DC, - SourceLocation KeyLoc, SourceLocation NameLoc, - unsigned D, unsigned P, IdentifierInfo *Id, - bool Typename, bool ParameterPack, - bool HasTypeConstraint, - Optional<unsigned> NumExpanded) { +TemplateTypeParmDecl *TemplateTypeParmDecl::Create( + const ASTContext &C, DeclContext *DC, SourceLocation KeyLoc, + SourceLocation NameLoc, unsigned D, unsigned P, IdentifierInfo *Id, + bool Typename, bool ParameterPack, bool HasTypeConstraint, + std::optional<unsigned> NumExpanded) { auto *TTPDecl = new (C, DC, additionalSizeToAlloc<TypeConstraint>(HasTypeConstraint ? 1 : 0)) @@ -632,39 +654,46 @@ TemplateTypeParmDecl::Create(const ASTContext &C, DeclContext *DC, } TemplateTypeParmDecl * -TemplateTypeParmDecl::CreateDeserialized(const ASTContext &C, unsigned ID) { - return new (C, ID) TemplateTypeParmDecl(nullptr, SourceLocation(), - SourceLocation(), nullptr, false, - false, None); +TemplateTypeParmDecl::CreateDeserialized(const ASTContext &C, GlobalDeclID ID) { + return new (C, ID) + TemplateTypeParmDecl(nullptr, SourceLocation(), SourceLocation(), nullptr, + false, false, std::nullopt); } TemplateTypeParmDecl * -TemplateTypeParmDecl::CreateDeserialized(const ASTContext &C, unsigned ID, +TemplateTypeParmDecl::CreateDeserialized(const ASTContext &C, GlobalDeclID ID, bool HasTypeConstraint) { return new (C, ID, additionalSizeToAlloc<TypeConstraint>(HasTypeConstraint ? 1 : 0)) - TemplateTypeParmDecl(nullptr, SourceLocation(), SourceLocation(), - nullptr, false, HasTypeConstraint, None); + TemplateTypeParmDecl(nullptr, SourceLocation(), SourceLocation(), nullptr, + false, HasTypeConstraint, std::nullopt); } SourceLocation TemplateTypeParmDecl::getDefaultArgumentLoc() const { - return hasDefaultArgument() - ? getDefaultArgumentInfo()->getTypeLoc().getBeginLoc() - : SourceLocation(); + return hasDefaultArgument() ? getDefaultArgument().getLocation() + : SourceLocation(); } SourceRange TemplateTypeParmDecl::getSourceRange() const { if (hasDefaultArgument() && !defaultArgumentWasInherited()) return SourceRange(getBeginLoc(), - getDefaultArgumentInfo()->getTypeLoc().getEndLoc()); + getDefaultArgument().getSourceRange().getEnd()); // TypeDecl::getSourceRange returns a range containing name location, which is // wrong for unnamed template parameters. e.g: // it will return <[[typename>]] instead of <[[typename]]> - else if (getDeclName().isEmpty()) + if (getDeclName().isEmpty()) return SourceRange(getBeginLoc()); return TypeDecl::getSourceRange(); } +void TemplateTypeParmDecl::setDefaultArgument( + const ASTContext &C, const TemplateArgumentLoc &DefArg) { + if (DefArg.getArgument().isNull()) + DefaultArgument.set(nullptr); + else + DefaultArgument.set(new (C) TemplateArgumentLoc(DefArg)); +} + unsigned TemplateTypeParmDecl::getDepth() const { return getTypeForDecl()->castAs<TemplateTypeParmType>()->getDepth(); } @@ -677,17 +706,15 @@ bool TemplateTypeParmDecl::isParameterPack() const { return getTypeForDecl()->castAs<TemplateTypeParmType>()->isParameterPack(); } -void TemplateTypeParmDecl::setTypeConstraint(NestedNameSpecifierLoc NNS, - DeclarationNameInfo NameInfo, NamedDecl *FoundDecl, ConceptDecl *CD, - const ASTTemplateArgumentListInfo *ArgsAsWritten, - Expr *ImmediatelyDeclaredConstraint) { +void TemplateTypeParmDecl::setTypeConstraint( + ConceptReference *Loc, Expr *ImmediatelyDeclaredConstraint) { assert(HasTypeConstraint && "HasTypeConstraint=true must be passed at construction in order to " "call setTypeConstraint"); assert(!TypeConstraintInitialized && "TypeConstraint was already initialized!"); - new (getTrailingObjects<TypeConstraint>()) TypeConstraint(NNS, NameInfo, - FoundDecl, CD, ArgsAsWritten, ImmediatelyDeclaredConstraint); + new (getTrailingObjects<TypeConstraint>()) + TypeConstraint(Loc, ImmediatelyDeclaredConstraint); TypeConstraintInitialized = true; } @@ -697,7 +724,7 @@ void TemplateTypeParmDecl::setTypeConstraint(NestedNameSpecifierLoc NNS, NonTypeTemplateParmDecl::NonTypeTemplateParmDecl( DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc, unsigned D, - unsigned P, IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo, + unsigned P, const IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo, ArrayRef<QualType> ExpandedTypes, ArrayRef<TypeSourceInfo *> ExpandedTInfos) : DeclaratorDecl(NonTypeTemplateParm, DC, IdLoc, Id, T, TInfo, StartLoc), TemplateParmPosition(D, P), ParameterPack(true), @@ -712,12 +739,10 @@ NonTypeTemplateParmDecl::NonTypeTemplateParmDecl( } } -NonTypeTemplateParmDecl * -NonTypeTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC, - SourceLocation StartLoc, SourceLocation IdLoc, - unsigned D, unsigned P, IdentifierInfo *Id, - QualType T, bool ParameterPack, - TypeSourceInfo *TInfo) { +NonTypeTemplateParmDecl *NonTypeTemplateParmDecl::Create( + const ASTContext &C, DeclContext *DC, SourceLocation StartLoc, + SourceLocation IdLoc, unsigned D, unsigned P, const IdentifierInfo *Id, + QualType T, bool ParameterPack, TypeSourceInfo *TInfo) { AutoType *AT = C.getLangOpts().CPlusPlus20 ? T->getContainedAutoType() : nullptr; return new (C, DC, @@ -730,7 +755,7 @@ NonTypeTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC, NonTypeTemplateParmDecl *NonTypeTemplateParmDecl::Create( const ASTContext &C, DeclContext *DC, SourceLocation StartLoc, - SourceLocation IdLoc, unsigned D, unsigned P, IdentifierInfo *Id, + SourceLocation IdLoc, unsigned D, unsigned P, const IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo, ArrayRef<QualType> ExpandedTypes, ArrayRef<TypeSourceInfo *> ExpandedTInfos) { AutoType *AT = TInfo->getType()->getContainedAutoType(); @@ -743,7 +768,7 @@ NonTypeTemplateParmDecl *NonTypeTemplateParmDecl::Create( } NonTypeTemplateParmDecl * -NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID, +NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID, bool HasTypeConstraint) { return new (C, ID, additionalSizeToAlloc<std::pair<QualType, TypeSourceInfo *>, @@ -754,16 +779,16 @@ NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID, } NonTypeTemplateParmDecl * -NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID, +NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID, unsigned NumExpandedTypes, bool HasTypeConstraint) { auto *NTTP = - new (C, ID, additionalSizeToAlloc<std::pair<QualType, TypeSourceInfo *>, - Expr *>( - NumExpandedTypes, HasTypeConstraint ? 1 : 0)) + new (C, ID, + additionalSizeToAlloc<std::pair<QualType, TypeSourceInfo *>, Expr *>( + NumExpandedTypes, HasTypeConstraint ? 1 : 0)) NonTypeTemplateParmDecl(nullptr, SourceLocation(), SourceLocation(), - 0, 0, nullptr, QualType(), nullptr, None, - None); + 0, 0, nullptr, QualType(), nullptr, + std::nullopt, std::nullopt); NTTP->NumExpandedTypes = NumExpandedTypes; return NTTP; } @@ -771,14 +796,21 @@ NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID, SourceRange NonTypeTemplateParmDecl::getSourceRange() const { if (hasDefaultArgument() && !defaultArgumentWasInherited()) return SourceRange(getOuterLocStart(), - getDefaultArgument()->getSourceRange().getEnd()); + getDefaultArgument().getSourceRange().getEnd()); return DeclaratorDecl::getSourceRange(); } SourceLocation NonTypeTemplateParmDecl::getDefaultArgumentLoc() const { - return hasDefaultArgument() - ? getDefaultArgument()->getSourceRange().getBegin() - : SourceLocation(); + return hasDefaultArgument() ? getDefaultArgument().getSourceRange().getBegin() + : SourceLocation(); +} + +void NonTypeTemplateParmDecl::setDefaultArgument( + const ASTContext &C, const TemplateArgumentLoc &DefArg) { + if (DefArg.getArgument().isNull()) + DefaultArgument.set(nullptr); + else + DefaultArgument.set(new (C) TemplateArgumentLoc(DefArg)); } //===----------------------------------------------------------------------===// @@ -789,10 +821,10 @@ void TemplateTemplateParmDecl::anchor() {} TemplateTemplateParmDecl::TemplateTemplateParmDecl( DeclContext *DC, SourceLocation L, unsigned D, unsigned P, - IdentifierInfo *Id, TemplateParameterList *Params, + IdentifierInfo *Id, bool Typename, TemplateParameterList *Params, ArrayRef<TemplateParameterList *> Expansions) : TemplateDecl(TemplateTemplateParm, DC, L, Id, Params), - TemplateParmPosition(D, P), ParameterPack(true), + TemplateParmPosition(D, P), Typename(Typename), ParameterPack(true), ExpandedParameterPack(true), NumExpandedParams(Expansions.size()) { if (!Expansions.empty()) std::uninitialized_copy(Expansions.begin(), Expansions.end(), @@ -803,35 +835,35 @@ TemplateTemplateParmDecl * TemplateTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC, SourceLocation L, unsigned D, unsigned P, bool ParameterPack, IdentifierInfo *Id, - TemplateParameterList *Params) { + bool Typename, TemplateParameterList *Params) { return new (C, DC) TemplateTemplateParmDecl(DC, L, D, P, ParameterPack, Id, - Params); + Typename, Params); } TemplateTemplateParmDecl * TemplateTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC, SourceLocation L, unsigned D, unsigned P, - IdentifierInfo *Id, + IdentifierInfo *Id, bool Typename, TemplateParameterList *Params, ArrayRef<TemplateParameterList *> Expansions) { return new (C, DC, additionalSizeToAlloc<TemplateParameterList *>(Expansions.size())) - TemplateTemplateParmDecl(DC, L, D, P, Id, Params, Expansions); + TemplateTemplateParmDecl(DC, L, D, P, Id, Typename, Params, Expansions); } TemplateTemplateParmDecl * -TemplateTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +TemplateTemplateParmDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) { return new (C, ID) TemplateTemplateParmDecl(nullptr, SourceLocation(), 0, 0, - false, nullptr, nullptr); + false, nullptr, false, nullptr); } TemplateTemplateParmDecl * -TemplateTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID, +TemplateTemplateParmDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID, unsigned NumExpansions) { auto *TTP = new (C, ID, additionalSizeToAlloc<TemplateParameterList *>(NumExpansions)) TemplateTemplateParmDecl(nullptr, SourceLocation(), 0, 0, nullptr, - nullptr, None); + false, nullptr, std::nullopt); TTP->NumExpandedParams = NumExpansions; return TTP; } @@ -853,8 +885,7 @@ void TemplateTemplateParmDecl::setDefaultArgument( // TemplateArgumentList Implementation //===----------------------------------------------------------------------===// TemplateArgumentList::TemplateArgumentList(ArrayRef<TemplateArgument> Args) - : Arguments(getTrailingObjects<TemplateArgument>()), - NumArguments(Args.size()) { + : NumArguments(Args.size()) { std::uninitialized_copy(Args.begin(), Args.end(), getTrailingObjects<TemplateArgument>()); } @@ -868,7 +899,7 @@ TemplateArgumentList::CreateCopy(ASTContext &Context, FunctionTemplateSpecializationInfo *FunctionTemplateSpecializationInfo::Create( ASTContext &C, FunctionDecl *FD, FunctionTemplateDecl *Template, - TemplateSpecializationKind TSK, const TemplateArgumentList *TemplateArgs, + TemplateSpecializationKind TSK, TemplateArgumentList *TemplateArgs, const TemplateArgumentListInfo *TemplateArgsAsWritten, SourceLocation POI, MemberSpecializationInfo *MSInfo) { const ASTTemplateArgumentListInfo *ArgsAsWritten = nullptr; @@ -902,7 +933,7 @@ ClassTemplateSpecializationDecl(ASTContext &Context, Kind DK, TagKind TK, ClassTemplateSpecializationDecl::ClassTemplateSpecializationDecl(ASTContext &C, Kind DK) - : CXXRecordDecl(DK, TTK_Struct, C, nullptr, SourceLocation(), + : CXXRecordDecl(DK, TagTypeKind::Struct, C, nullptr, SourceLocation(), SourceLocation(), nullptr, nullptr), SpecializationKind(TSK_Undeclared) {} @@ -920,13 +951,21 @@ ClassTemplateSpecializationDecl::Create(ASTContext &Context, TagKind TK, SpecializedTemplate, Args, PrevDecl); Result->setMayHaveOutOfDateDef(false); + // If the template decl is incomplete, copy the external lexical storage from + // the base template. This allows instantiations of incomplete types to + // complete using the external AST if the template's declaration came from an + // external AST. + if (!SpecializedTemplate->getTemplatedDecl()->isCompleteDefinition()) + Result->setHasExternalLexicalStorage( + SpecializedTemplate->getTemplatedDecl()->hasExternalLexicalStorage()); + Context.getTypeDeclType(Result, PrevDecl); return Result; } ClassTemplateSpecializationDecl * ClassTemplateSpecializationDecl::CreateDeserialized(ASTContext &C, - unsigned ID) { + GlobalDeclID ID) { auto *Result = new (C, ID) ClassTemplateSpecializationDecl(C, ClassTemplateSpecialization); Result->setMayHaveOutOfDateDef(false); @@ -961,41 +1000,67 @@ ClassTemplateSpecializationDecl::getSpecializedTemplate() const { SourceRange ClassTemplateSpecializationDecl::getSourceRange() const { - if (ExplicitInfo) { - SourceLocation Begin = getTemplateKeywordLoc(); - if (Begin.isValid()) { - // Here we have an explicit (partial) specialization or instantiation. - assert(getSpecializationKind() == TSK_ExplicitSpecialization || - getSpecializationKind() == TSK_ExplicitInstantiationDeclaration || - getSpecializationKind() == TSK_ExplicitInstantiationDefinition); - if (getExternLoc().isValid()) - Begin = getExternLoc(); - SourceLocation End = getBraceRange().getEnd(); - if (End.isInvalid()) - End = getTypeAsWritten()->getTypeLoc().getEndLoc(); - return SourceRange(Begin, End); - } - // An implicit instantiation of a class template partial specialization - // uses ExplicitInfo to record the TypeAsWritten, but the source - // locations should be retrieved from the instantiation pattern. - using CTPSDecl = ClassTemplatePartialSpecializationDecl; - auto *ctpsd = const_cast<CTPSDecl *>(cast<CTPSDecl>(this)); - CTPSDecl *inst_from = ctpsd->getInstantiatedFromMember(); - assert(inst_from != nullptr); - return inst_from->getSourceRange(); - } - else { - // No explicit info available. + switch (getSpecializationKind()) { + case TSK_Undeclared: + case TSK_ImplicitInstantiation: { llvm::PointerUnion<ClassTemplateDecl *, ClassTemplatePartialSpecializationDecl *> - inst_from = getInstantiatedFrom(); - if (inst_from.isNull()) - return getSpecializedTemplate()->getSourceRange(); - if (const auto *ctd = inst_from.dyn_cast<ClassTemplateDecl *>()) - return ctd->getSourceRange(); - return inst_from.get<ClassTemplatePartialSpecializationDecl *>() - ->getSourceRange(); + Pattern = getSpecializedTemplateOrPartial(); + assert(!Pattern.isNull() && + "Class template specialization without pattern?"); + if (const auto *CTPSD = + Pattern.dyn_cast<ClassTemplatePartialSpecializationDecl *>()) + return CTPSD->getSourceRange(); + return Pattern.get<ClassTemplateDecl *>()->getSourceRange(); + } + case TSK_ExplicitSpecialization: { + SourceRange Range = CXXRecordDecl::getSourceRange(); + if (const ASTTemplateArgumentListInfo *Args = getTemplateArgsAsWritten(); + !isThisDeclarationADefinition() && Args) + Range.setEnd(Args->getRAngleLoc()); + return Range; + } + case TSK_ExplicitInstantiationDeclaration: + case TSK_ExplicitInstantiationDefinition: { + SourceRange Range = CXXRecordDecl::getSourceRange(); + if (SourceLocation ExternKW = getExternKeywordLoc(); ExternKW.isValid()) + Range.setBegin(ExternKW); + else if (SourceLocation TemplateKW = getTemplateKeywordLoc(); + TemplateKW.isValid()) + Range.setBegin(TemplateKW); + if (const ASTTemplateArgumentListInfo *Args = getTemplateArgsAsWritten()) + Range.setEnd(Args->getRAngleLoc()); + return Range; + } } + llvm_unreachable("unhandled template specialization kind"); +} + +void ClassTemplateSpecializationDecl::setExternKeywordLoc(SourceLocation Loc) { + auto *Info = ExplicitInfo.dyn_cast<ExplicitInstantiationInfo *>(); + if (!Info) { + // Don't allocate if the location is invalid. + if (Loc.isInvalid()) + return; + Info = new (getASTContext()) ExplicitInstantiationInfo; + Info->TemplateArgsAsWritten = getTemplateArgsAsWritten(); + ExplicitInfo = Info; + } + Info->ExternKeywordLoc = Loc; +} + +void ClassTemplateSpecializationDecl::setTemplateKeywordLoc( + SourceLocation Loc) { + auto *Info = ExplicitInfo.dyn_cast<ExplicitInstantiationInfo *>(); + if (!Info) { + // Don't allocate if the location is invalid. + if (Loc.isInvalid()) + return; + Info = new (getASTContext()) ExplicitInstantiationInfo; + Info->TemplateArgsAsWritten = getTemplateArgsAsWritten(); + ExplicitInfo = Info; + } + Info->TemplateKeywordLoc = Loc; } //===----------------------------------------------------------------------===// @@ -1005,12 +1070,14 @@ ConceptDecl *ConceptDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L, DeclarationName Name, TemplateParameterList *Params, Expr *ConstraintExpr) { - AdoptTemplateParameterList(Params, DC); - return new (C, DC) ConceptDecl(DC, L, Name, Params, ConstraintExpr); + bool Invalid = AdoptTemplateParameterList(Params, DC); + auto *TD = new (C, DC) ConceptDecl(DC, L, Name, Params, ConstraintExpr); + if (Invalid) + TD->setInvalidDecl(); + return TD; } -ConceptDecl *ConceptDecl::CreateDeserialized(ASTContext &C, - unsigned ID) { +ConceptDecl *ConceptDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) { ConceptDecl *Result = new (C, ID) ConceptDecl(nullptr, SourceLocation(), DeclarationName(), nullptr, nullptr); @@ -1019,46 +1086,71 @@ ConceptDecl *ConceptDecl::CreateDeserialized(ASTContext &C, } //===----------------------------------------------------------------------===// +// ImplicitConceptSpecializationDecl Implementation +//===----------------------------------------------------------------------===// +ImplicitConceptSpecializationDecl::ImplicitConceptSpecializationDecl( + DeclContext *DC, SourceLocation SL, + ArrayRef<TemplateArgument> ConvertedArgs) + : Decl(ImplicitConceptSpecialization, DC, SL), + NumTemplateArgs(ConvertedArgs.size()) { + setTemplateArguments(ConvertedArgs); +} + +ImplicitConceptSpecializationDecl::ImplicitConceptSpecializationDecl( + EmptyShell Empty, unsigned NumTemplateArgs) + : Decl(ImplicitConceptSpecialization, Empty), + NumTemplateArgs(NumTemplateArgs) {} + +ImplicitConceptSpecializationDecl *ImplicitConceptSpecializationDecl::Create( + const ASTContext &C, DeclContext *DC, SourceLocation SL, + ArrayRef<TemplateArgument> ConvertedArgs) { + return new (C, DC, + additionalSizeToAlloc<TemplateArgument>(ConvertedArgs.size())) + ImplicitConceptSpecializationDecl(DC, SL, ConvertedArgs); +} + +ImplicitConceptSpecializationDecl * +ImplicitConceptSpecializationDecl::CreateDeserialized( + const ASTContext &C, GlobalDeclID ID, unsigned NumTemplateArgs) { + return new (C, ID, additionalSizeToAlloc<TemplateArgument>(NumTemplateArgs)) + ImplicitConceptSpecializationDecl(EmptyShell{}, NumTemplateArgs); +} + +void ImplicitConceptSpecializationDecl::setTemplateArguments( + ArrayRef<TemplateArgument> Converted) { + assert(Converted.size() == NumTemplateArgs); + std::uninitialized_copy(Converted.begin(), Converted.end(), + getTrailingObjects<TemplateArgument>()); +} + +//===----------------------------------------------------------------------===// // ClassTemplatePartialSpecializationDecl Implementation //===----------------------------------------------------------------------===// void ClassTemplatePartialSpecializationDecl::anchor() {} -ClassTemplatePartialSpecializationDecl:: -ClassTemplatePartialSpecializationDecl(ASTContext &Context, TagKind TK, - DeclContext *DC, - SourceLocation StartLoc, - SourceLocation IdLoc, - TemplateParameterList *Params, - ClassTemplateDecl *SpecializedTemplate, - ArrayRef<TemplateArgument> Args, - const ASTTemplateArgumentListInfo *ArgInfos, - ClassTemplatePartialSpecializationDecl *PrevDecl) - : ClassTemplateSpecializationDecl(Context, - ClassTemplatePartialSpecialization, - TK, DC, StartLoc, IdLoc, - SpecializedTemplate, Args, PrevDecl), - TemplateParams(Params), ArgsAsWritten(ArgInfos), - InstantiatedFromMember(nullptr, false) { - AdoptTemplateParameterList(Params, this); +ClassTemplatePartialSpecializationDecl::ClassTemplatePartialSpecializationDecl( + ASTContext &Context, TagKind TK, DeclContext *DC, SourceLocation StartLoc, + SourceLocation IdLoc, TemplateParameterList *Params, + ClassTemplateDecl *SpecializedTemplate, ArrayRef<TemplateArgument> Args, + ClassTemplatePartialSpecializationDecl *PrevDecl) + : ClassTemplateSpecializationDecl( + Context, ClassTemplatePartialSpecialization, TK, DC, StartLoc, IdLoc, + SpecializedTemplate, Args, PrevDecl), + TemplateParams(Params), InstantiatedFromMember(nullptr, false) { + if (AdoptTemplateParameterList(Params, this)) + setInvalidDecl(); } ClassTemplatePartialSpecializationDecl * -ClassTemplatePartialSpecializationDecl:: -Create(ASTContext &Context, TagKind TK,DeclContext *DC, - SourceLocation StartLoc, SourceLocation IdLoc, - TemplateParameterList *Params, - ClassTemplateDecl *SpecializedTemplate, - ArrayRef<TemplateArgument> Args, - const TemplateArgumentListInfo &ArgInfos, - QualType CanonInjectedType, - ClassTemplatePartialSpecializationDecl *PrevDecl) { - const ASTTemplateArgumentListInfo *ASTArgInfos = - ASTTemplateArgumentListInfo::Create(Context, ArgInfos); - - auto *Result = new (Context, DC) - ClassTemplatePartialSpecializationDecl(Context, TK, DC, StartLoc, IdLoc, - Params, SpecializedTemplate, Args, - ASTArgInfos, PrevDecl); +ClassTemplatePartialSpecializationDecl::Create( + ASTContext &Context, TagKind TK, DeclContext *DC, SourceLocation StartLoc, + SourceLocation IdLoc, TemplateParameterList *Params, + ClassTemplateDecl *SpecializedTemplate, ArrayRef<TemplateArgument> Args, + QualType CanonInjectedType, + ClassTemplatePartialSpecializationDecl *PrevDecl) { + auto *Result = new (Context, DC) ClassTemplatePartialSpecializationDecl( + Context, TK, DC, StartLoc, IdLoc, Params, SpecializedTemplate, Args, + PrevDecl); Result->setSpecializationKind(TSK_ExplicitSpecialization); Result->setMayHaveOutOfDateDef(false); @@ -1068,12 +1160,24 @@ Create(ASTContext &Context, TagKind TK,DeclContext *DC, ClassTemplatePartialSpecializationDecl * ClassTemplatePartialSpecializationDecl::CreateDeserialized(ASTContext &C, - unsigned ID) { + GlobalDeclID ID) { auto *Result = new (C, ID) ClassTemplatePartialSpecializationDecl(C); Result->setMayHaveOutOfDateDef(false); return Result; } +SourceRange ClassTemplatePartialSpecializationDecl::getSourceRange() const { + if (const ClassTemplatePartialSpecializationDecl *MT = + getInstantiatedFromMember(); + MT && !isMemberSpecialization()) + return MT->getSourceRange(); + SourceRange Range = ClassTemplateSpecializationDecl::getSourceRange(); + if (const TemplateParameterList *TPL = getTemplateParameters(); + TPL && !getNumTemplateParameterLists()) + Range.setBegin(TPL->getTemplateLoc()); + return Range; +} + //===----------------------------------------------------------------------===// // FriendTemplateDecl Implementation //===----------------------------------------------------------------------===// @@ -1085,11 +1189,17 @@ FriendTemplateDecl::Create(ASTContext &Context, DeclContext *DC, SourceLocation L, MutableArrayRef<TemplateParameterList *> Params, FriendUnion Friend, SourceLocation FLoc) { - return new (Context, DC) FriendTemplateDecl(DC, L, Params, Friend, FLoc); + TemplateParameterList **TPL = nullptr; + if (!Params.empty()) { + TPL = new (Context) TemplateParameterList *[Params.size()]; + llvm::copy(Params, TPL); + } + return new (Context, DC) + FriendTemplateDecl(DC, L, TPL, Params.size(), Friend, FLoc); } FriendTemplateDecl *FriendTemplateDecl::CreateDeserialized(ASTContext &C, - unsigned ID) { + GlobalDeclID ID) { return new (C, ID) FriendTemplateDecl(EmptyShell()); } @@ -1097,18 +1207,19 @@ FriendTemplateDecl *FriendTemplateDecl::CreateDeserialized(ASTContext &C, // TypeAliasTemplateDecl Implementation //===----------------------------------------------------------------------===// -TypeAliasTemplateDecl *TypeAliasTemplateDecl::Create(ASTContext &C, - DeclContext *DC, - SourceLocation L, - DeclarationName Name, - TemplateParameterList *Params, - NamedDecl *Decl) { - AdoptTemplateParameterList(Params, DC); - return new (C, DC) TypeAliasTemplateDecl(C, DC, L, Name, Params, Decl); +TypeAliasTemplateDecl * +TypeAliasTemplateDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L, + DeclarationName Name, + TemplateParameterList *Params, NamedDecl *Decl) { + bool Invalid = AdoptTemplateParameterList(Params, DC); + auto *TD = new (C, DC) TypeAliasTemplateDecl(C, DC, L, Name, Params, Decl); + if (Invalid) + TD->setInvalidDecl(); + return TD; } -TypeAliasTemplateDecl *TypeAliasTemplateDecl::CreateDeserialized(ASTContext &C, - unsigned ID) { +TypeAliasTemplateDecl * +TypeAliasTemplateDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) { return new (C, ID) TypeAliasTemplateDecl(C, nullptr, SourceLocation(), DeclarationName(), nullptr, nullptr); } @@ -1121,19 +1232,6 @@ TypeAliasTemplateDecl::newCommon(ASTContext &C) const { } //===----------------------------------------------------------------------===// -// ClassScopeFunctionSpecializationDecl Implementation -//===----------------------------------------------------------------------===// - -void ClassScopeFunctionSpecializationDecl::anchor() {} - -ClassScopeFunctionSpecializationDecl * -ClassScopeFunctionSpecializationDecl::CreateDeserialized(ASTContext &C, - unsigned ID) { - return new (C, ID) ClassScopeFunctionSpecializationDecl( - nullptr, SourceLocation(), nullptr, nullptr); -} - -//===----------------------------------------------------------------------===// // VarTemplateDecl Implementation //===----------------------------------------------------------------------===// @@ -1151,12 +1249,15 @@ VarTemplateDecl *VarTemplateDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L, DeclarationName Name, TemplateParameterList *Params, VarDecl *Decl) { - AdoptTemplateParameterList(Params, DC); - return new (C, DC) VarTemplateDecl(C, DC, L, Name, Params, Decl); + bool Invalid = AdoptTemplateParameterList(Params, DC); + auto *TD = new (C, DC) VarTemplateDecl(C, DC, L, Name, Params, Decl); + if (Invalid) + TD->setInvalidDecl(); + return TD; } VarTemplateDecl *VarTemplateDecl::CreateDeserialized(ASTContext &C, - unsigned ID) { + GlobalDeclID ID) { return new (C, ID) VarTemplateDecl(C, nullptr, SourceLocation(), DeclarationName(), nullptr, nullptr); } @@ -1202,14 +1303,13 @@ VarTemplateDecl::findPartialSpecialization(ArrayRef<TemplateArgument> Args, TPL); } -void -VarTemplatePartialSpecializationDecl::Profile(llvm::FoldingSetNodeID &ID, - ArrayRef<TemplateArgument> TemplateArgs, TemplateParameterList *TPL, - ASTContext &Context) { +void VarTemplatePartialSpecializationDecl::Profile( + llvm::FoldingSetNodeID &ID, ArrayRef<TemplateArgument> TemplateArgs, + TemplateParameterList *TPL, const ASTContext &Context) { ID.AddInteger(TemplateArgs.size()); for (const TemplateArgument &TemplateArg : TemplateArgs) TemplateArg.Profile(ID, Context); - ProfileTemplateParameterList(Context, ID, TPL); + TPL->Profile(ID, Context); } void VarTemplateDecl::AddPartialSpecialization( @@ -1279,7 +1379,8 @@ VarTemplateSpecializationDecl *VarTemplateSpecializationDecl::Create( } VarTemplateSpecializationDecl * -VarTemplateSpecializationDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +VarTemplateSpecializationDecl::CreateDeserialized(ASTContext &C, + GlobalDeclID ID) { return new (C, ID) VarTemplateSpecializationDecl(VarTemplateSpecialization, C); } @@ -1309,12 +1410,72 @@ VarTemplateDecl *VarTemplateSpecializationDecl::getSpecializedTemplate() const { return SpecializedTemplate.get<VarTemplateDecl *>(); } -void VarTemplateSpecializationDecl::setTemplateArgsInfo( - const TemplateArgumentListInfo &ArgsInfo) { - TemplateArgsInfo.setLAngleLoc(ArgsInfo.getLAngleLoc()); - TemplateArgsInfo.setRAngleLoc(ArgsInfo.getRAngleLoc()); - for (const TemplateArgumentLoc &Loc : ArgsInfo.arguments()) - TemplateArgsInfo.addArgument(Loc); +SourceRange VarTemplateSpecializationDecl::getSourceRange() const { + switch (getSpecializationKind()) { + case TSK_Undeclared: + case TSK_ImplicitInstantiation: { + llvm::PointerUnion<VarTemplateDecl *, + VarTemplatePartialSpecializationDecl *> + Pattern = getSpecializedTemplateOrPartial(); + assert(!Pattern.isNull() && + "Variable template specialization without pattern?"); + if (const auto *VTPSD = + Pattern.dyn_cast<VarTemplatePartialSpecializationDecl *>()) + return VTPSD->getSourceRange(); + VarTemplateDecl *VTD = Pattern.get<VarTemplateDecl *>(); + if (hasInit()) { + if (VarTemplateDecl *Definition = VTD->getDefinition()) + return Definition->getSourceRange(); + } + return VTD->getCanonicalDecl()->getSourceRange(); + } + case TSK_ExplicitSpecialization: { + SourceRange Range = VarDecl::getSourceRange(); + if (const ASTTemplateArgumentListInfo *Args = getTemplateArgsAsWritten(); + !hasInit() && Args) + Range.setEnd(Args->getRAngleLoc()); + return Range; + } + case TSK_ExplicitInstantiationDeclaration: + case TSK_ExplicitInstantiationDefinition: { + SourceRange Range = VarDecl::getSourceRange(); + if (SourceLocation ExternKW = getExternKeywordLoc(); ExternKW.isValid()) + Range.setBegin(ExternKW); + else if (SourceLocation TemplateKW = getTemplateKeywordLoc(); + TemplateKW.isValid()) + Range.setBegin(TemplateKW); + if (const ASTTemplateArgumentListInfo *Args = getTemplateArgsAsWritten()) + Range.setEnd(Args->getRAngleLoc()); + return Range; + } + } + llvm_unreachable("unhandled template specialization kind"); +} + +void VarTemplateSpecializationDecl::setExternKeywordLoc(SourceLocation Loc) { + auto *Info = ExplicitInfo.dyn_cast<ExplicitInstantiationInfo *>(); + if (!Info) { + // Don't allocate if the location is invalid. + if (Loc.isInvalid()) + return; + Info = new (getASTContext()) ExplicitInstantiationInfo; + Info->TemplateArgsAsWritten = getTemplateArgsAsWritten(); + ExplicitInfo = Info; + } + Info->ExternKeywordLoc = Loc; +} + +void VarTemplateSpecializationDecl::setTemplateKeywordLoc(SourceLocation Loc) { + auto *Info = ExplicitInfo.dyn_cast<ExplicitInstantiationInfo *>(); + if (!Info) { + // Don't allocate if the location is invalid. + if (Loc.isInvalid()) + return; + Info = new (getASTContext()) ExplicitInstantiationInfo; + Info->TemplateArgsAsWritten = getTemplateArgsAsWritten(); + ExplicitInfo = Info; + } + Info->TemplateKeywordLoc = Loc; } //===----------------------------------------------------------------------===// @@ -1327,15 +1488,13 @@ VarTemplatePartialSpecializationDecl::VarTemplatePartialSpecializationDecl( ASTContext &Context, DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc, TemplateParameterList *Params, VarTemplateDecl *SpecializedTemplate, QualType T, TypeSourceInfo *TInfo, - StorageClass S, ArrayRef<TemplateArgument> Args, - const ASTTemplateArgumentListInfo *ArgInfos) + StorageClass S, ArrayRef<TemplateArgument> Args) : VarTemplateSpecializationDecl(VarTemplatePartialSpecialization, Context, DC, StartLoc, IdLoc, SpecializedTemplate, T, TInfo, S, Args), - TemplateParams(Params), ArgsAsWritten(ArgInfos), - InstantiatedFromMember(nullptr, false) { - // TODO: The template parameters should be in DC by now. Verify. - // AdoptTemplateParameterList(Params, DC); + TemplateParams(Params), InstantiatedFromMember(nullptr, false) { + if (AdoptTemplateParameterList(Params, DC)) + setInvalidDecl(); } VarTemplatePartialSpecializationDecl * @@ -1343,25 +1502,32 @@ VarTemplatePartialSpecializationDecl::Create( ASTContext &Context, DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc, TemplateParameterList *Params, VarTemplateDecl *SpecializedTemplate, QualType T, TypeSourceInfo *TInfo, - StorageClass S, ArrayRef<TemplateArgument> Args, - const TemplateArgumentListInfo &ArgInfos) { - const ASTTemplateArgumentListInfo *ASTArgInfos - = ASTTemplateArgumentListInfo::Create(Context, ArgInfos); - - auto *Result = - new (Context, DC) VarTemplatePartialSpecializationDecl( - Context, DC, StartLoc, IdLoc, Params, SpecializedTemplate, T, TInfo, - S, Args, ASTArgInfos); + StorageClass S, ArrayRef<TemplateArgument> Args) { + auto *Result = new (Context, DC) VarTemplatePartialSpecializationDecl( + Context, DC, StartLoc, IdLoc, Params, SpecializedTemplate, T, TInfo, S, + Args); Result->setSpecializationKind(TSK_ExplicitSpecialization); return Result; } VarTemplatePartialSpecializationDecl * VarTemplatePartialSpecializationDecl::CreateDeserialized(ASTContext &C, - unsigned ID) { + GlobalDeclID ID) { return new (C, ID) VarTemplatePartialSpecializationDecl(C); } +SourceRange VarTemplatePartialSpecializationDecl::getSourceRange() const { + if (const VarTemplatePartialSpecializationDecl *MT = + getInstantiatedFromMember(); + MT && !isMemberSpecialization()) + return MT->getSourceRange(); + SourceRange Range = VarTemplateSpecializationDecl::getSourceRange(); + if (const TemplateParameterList *TPL = getTemplateParameters(); + TPL && !getNumTemplateParameterLists()) + Range.setBegin(TPL->getTemplateLoc()); + return Range; +} + static TemplateParameterList * createMakeIntegerSeqParameterList(const ASTContext &C, DeclContext *DC) { // typename T @@ -1387,7 +1553,7 @@ createMakeIntegerSeqParameterList(const ASTContext &C, DeclContext *DC) { // template <typename T, ...Ints> class IntSeq auto *TemplateTemplateParm = TemplateTemplateParmDecl::Create( C, DC, SourceLocation(), /*Depth=*/0, /*Position=*/0, - /*ParameterPack=*/false, /*Id=*/nullptr, TPL); + /*ParameterPack=*/false, /*Id=*/nullptr, /*Typename=*/false, TPL); TemplateTemplateParm->setImplicit(true); // typename T @@ -1429,8 +1595,8 @@ createTypePackElementParameterList(const ASTContext &C, DeclContext *DC) { // template <std::size_t Index, typename ...T> NamedDecl *Params[] = {Index, Ts}; return TemplateParameterList::Create(C, SourceLocation(), SourceLocation(), - llvm::makeArrayRef(Params), - SourceLocation(), nullptr); + llvm::ArrayRef(Params), SourceLocation(), + nullptr); } static TemplateParameterList *createBuiltinTemplateParameterList( @@ -1454,19 +1620,6 @@ BuiltinTemplateDecl::BuiltinTemplateDecl(const ASTContext &C, DeclContext *DC, createBuiltinTemplateParameterList(C, DC, BTK)), BTK(BTK) {} -void TypeConstraint::print(llvm::raw_ostream &OS, PrintingPolicy Policy) const { - if (NestedNameSpec) - NestedNameSpec.getNestedNameSpecifier()->print(OS, Policy); - ConceptName.printName(OS, Policy); - if (hasExplicitTemplateArgs()) { - OS << "<"; - // FIXME: Find corresponding parameter for argument - for (auto &ArgLoc : ArgsAsWritten->arguments()) - ArgLoc.getArgument().print(Policy, OS, /*IncludeType*/ false); - OS << ">"; - } -} - TemplateParamObjectDecl *TemplateParamObjectDecl::Create(const ASTContext &C, QualType T, const APValue &V) { @@ -1477,25 +1630,91 @@ TemplateParamObjectDecl *TemplateParamObjectDecl::Create(const ASTContext &C, } TemplateParamObjectDecl * -TemplateParamObjectDecl::CreateDeserialized(ASTContext &C, unsigned ID) { +TemplateParamObjectDecl::CreateDeserialized(ASTContext &C, GlobalDeclID ID) { auto *TPOD = new (C, ID) TemplateParamObjectDecl(nullptr, QualType(), APValue()); C.addDestruction(&TPOD->Value); return TPOD; } -void TemplateParamObjectDecl::printName(llvm::raw_ostream &OS) const { +void TemplateParamObjectDecl::printName(llvm::raw_ostream &OS, + const PrintingPolicy &Policy) const { OS << "<template param "; - printAsExpr(OS); + printAsExpr(OS, Policy); OS << ">"; } void TemplateParamObjectDecl::printAsExpr(llvm::raw_ostream &OS) const { - const ASTContext &Ctx = getASTContext(); - getType().getUnqualifiedType().print(OS, Ctx.getPrintingPolicy()); - printAsInit(OS); + printAsExpr(OS, getASTContext().getPrintingPolicy()); +} + +void TemplateParamObjectDecl::printAsExpr(llvm::raw_ostream &OS, + const PrintingPolicy &Policy) const { + getType().getUnqualifiedType().print(OS, Policy); + printAsInit(OS, Policy); } void TemplateParamObjectDecl::printAsInit(llvm::raw_ostream &OS) const { - const ASTContext &Ctx = getASTContext(); - getValue().printPretty(OS, Ctx, getType()); + printAsInit(OS, getASTContext().getPrintingPolicy()); +} + +void TemplateParamObjectDecl::printAsInit(llvm::raw_ostream &OS, + const PrintingPolicy &Policy) const { + getValue().printPretty(OS, Policy, getType(), &getASTContext()); +} + +TemplateParameterList *clang::getReplacedTemplateParameterList(Decl *D) { + switch (D->getKind()) { + case Decl::Kind::CXXRecord: + return cast<CXXRecordDecl>(D) + ->getDescribedTemplate() + ->getTemplateParameters(); + case Decl::Kind::ClassTemplate: + return cast<ClassTemplateDecl>(D)->getTemplateParameters(); + case Decl::Kind::ClassTemplateSpecialization: { + const auto *CTSD = cast<ClassTemplateSpecializationDecl>(D); + auto P = CTSD->getSpecializedTemplateOrPartial(); + if (const auto *CTPSD = + P.dyn_cast<ClassTemplatePartialSpecializationDecl *>()) + return CTPSD->getTemplateParameters(); + return cast<ClassTemplateDecl *>(P)->getTemplateParameters(); + } + case Decl::Kind::ClassTemplatePartialSpecialization: + return cast<ClassTemplatePartialSpecializationDecl>(D) + ->getTemplateParameters(); + case Decl::Kind::TypeAliasTemplate: + return cast<TypeAliasTemplateDecl>(D)->getTemplateParameters(); + case Decl::Kind::BuiltinTemplate: + return cast<BuiltinTemplateDecl>(D)->getTemplateParameters(); + case Decl::Kind::CXXDeductionGuide: + case Decl::Kind::CXXConversion: + case Decl::Kind::CXXConstructor: + case Decl::Kind::CXXDestructor: + case Decl::Kind::CXXMethod: + case Decl::Kind::Function: + return cast<FunctionDecl>(D) + ->getTemplateSpecializationInfo() + ->getTemplate() + ->getTemplateParameters(); + case Decl::Kind::FunctionTemplate: + return cast<FunctionTemplateDecl>(D)->getTemplateParameters(); + case Decl::Kind::VarTemplate: + return cast<VarTemplateDecl>(D)->getTemplateParameters(); + case Decl::Kind::VarTemplateSpecialization: { + const auto *VTSD = cast<VarTemplateSpecializationDecl>(D); + auto P = VTSD->getSpecializedTemplateOrPartial(); + if (const auto *VTPSD = + P.dyn_cast<VarTemplatePartialSpecializationDecl *>()) + return VTPSD->getTemplateParameters(); + return cast<VarTemplateDecl *>(P)->getTemplateParameters(); + } + case Decl::Kind::VarTemplatePartialSpecialization: + return cast<VarTemplatePartialSpecializationDecl>(D) + ->getTemplateParameters(); + case Decl::Kind::TemplateTemplateParm: + return cast<TemplateTemplateParmDecl>(D)->getTemplateParameters(); + case Decl::Kind::Concept: + return cast<ConceptDecl>(D)->getTemplateParameters(); + default: + llvm_unreachable("Unhandled templated declaration kind"); + } } diff --git a/contrib/llvm-project/clang/lib/AST/DeclarationName.cpp b/contrib/llvm-project/clang/lib/AST/DeclarationName.cpp index 56cf4b457a48..a3ac5551e0cc 100644 --- a/contrib/llvm-project/clang/lib/AST/DeclarationName.cpp +++ b/contrib/llvm-project/clang/lib/AST/DeclarationName.cpp @@ -72,15 +72,9 @@ int DeclarationName::compare(DeclarationName LHS, DeclarationName RHS) { } unsigned LN = LHSSelector.getNumArgs(), RN = RHSSelector.getNumArgs(); for (unsigned I = 0, N = std::min(LN, RN); I != N; ++I) { - switch (LHSSelector.getNameForSlot(I).compare( - RHSSelector.getNameForSlot(I))) { - case -1: - return -1; - case 1: - return 1; - default: - break; - } + if (int Compare = LHSSelector.getNameForSlot(I).compare( + RHSSelector.getNameForSlot(I))) + return Compare; } return compareInt(LN, RN); @@ -123,12 +117,12 @@ static void printCXXConstructorDestructorName(QualType ClassType, Policy.adjustForCPlusPlus(); if (const RecordType *ClassRec = ClassType->getAs<RecordType>()) { - OS << *ClassRec->getDecl(); + ClassRec->getDecl()->printName(OS, Policy); return; } if (Policy.SuppressTemplateArgsInCXXConstructors) { if (auto *InjTy = ClassType->getAs<InjectedClassNameType>()) { - OS << *InjTy->getDecl(); + InjTy->getDecl()->printName(OS, Policy); return; } } @@ -236,7 +230,7 @@ std::string DeclarationName::getAsString() const { std::string Result; llvm::raw_string_ostream OS(Result); OS << *this; - return OS.str(); + return Result; } void *DeclarationName::getFETokenInfoSlow() const { @@ -371,7 +365,7 @@ DeclarationNameTable::getCXXSpecialName(DeclarationName::NameKind Kind, } DeclarationName -DeclarationNameTable::getCXXLiteralOperatorName(IdentifierInfo *II) { +DeclarationNameTable::getCXXLiteralOperatorName(const IdentifierInfo *II) { llvm::FoldingSetNodeID ID; ID.AddPointer(II); @@ -460,7 +454,7 @@ std::string DeclarationNameInfo::getAsString() const { std::string Result; llvm::raw_string_ostream OS(Result); OS << *this; - return OS.str(); + return Result; } raw_ostream &clang::operator<<(raw_ostream &OS, DeclarationNameInfo DNInfo) { diff --git a/contrib/llvm-project/clang/lib/AST/Expr.cpp b/contrib/llvm-project/clang/lib/AST/Expr.cpp index 11f10d4695fc..9d5b8167d0ee 100644 --- a/contrib/llvm-project/clang/lib/AST/Expr.cpp +++ b/contrib/llvm-project/clang/lib/AST/Expr.cpp @@ -31,11 +31,13 @@ #include "clang/Basic/TargetInfo.h" #include "clang/Lex/Lexer.h" #include "clang/Lex/LiteralSupport.h" +#include "clang/Lex/Preprocessor.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Format.h" #include "llvm/Support/raw_ostream.h" #include <algorithm> #include <cstring> +#include <optional> using namespace clang; const Expr *Expr::getBestDynamicClassTypeExpr() const { @@ -84,12 +86,12 @@ const Expr *Expr::skipRValueSubobjectAdjustments( while (true) { E = E->IgnoreParens(); - if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { + if (const auto *CE = dyn_cast<CastExpr>(E)) { if ((CE->getCastKind() == CK_DerivedToBase || CE->getCastKind() == CK_UncheckedDerivedToBase) && E->getType()->isRecordType()) { E = CE->getSubExpr(); - auto *Derived = + const auto *Derived = cast<CXXRecordDecl>(E->getType()->castAs<RecordType>()->getDecl()); Adjustments.push_back(SubobjectAdjustment(CE, Derived)); continue; @@ -99,10 +101,10 @@ const Expr *Expr::skipRValueSubobjectAdjustments( E = CE->getSubExpr(); continue; } - } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { + } else if (const auto *ME = dyn_cast<MemberExpr>(E)) { if (!ME->isArrow()) { - assert(ME->getBase()->getType()->isRecordType()); - if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) { + assert(ME->getBase()->getType()->getAsRecordDecl()); + if (const auto *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) { if (!Field->isBitField() && !Field->getType()->isReferenceType()) { E = ME->getBase(); Adjustments.push_back(SubobjectAdjustment(Field)); @@ -110,12 +112,11 @@ const Expr *Expr::skipRValueSubobjectAdjustments( } } } - } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { + } else if (const auto *BO = dyn_cast<BinaryOperator>(E)) { if (BO->getOpcode() == BO_PtrMemD) { assert(BO->getRHS()->isPRValue()); E = BO->getLHS(); - const MemberPointerType *MPT = - BO->getRHS()->getType()->getAs<MemberPointerType>(); + const auto *MPT = BO->getRHS()->getType()->getAs<MemberPointerType>(); Adjustments.push_back(SubobjectAdjustment(MPT, BO->getRHS())); continue; } @@ -202,6 +203,42 @@ bool Expr::isKnownToHaveBooleanValue(bool Semantic) const { return false; } +bool Expr::isFlexibleArrayMemberLike( + ASTContext &Ctx, + LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel, + bool IgnoreTemplateOrMacroSubstitution) const { + const Expr *E = IgnoreParens(); + const Decl *D = nullptr; + + if (const auto *ME = dyn_cast<MemberExpr>(E)) + D = ME->getMemberDecl(); + else if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) + D = DRE->getDecl(); + else if (const auto *IRE = dyn_cast<ObjCIvarRefExpr>(E)) + D = IRE->getDecl(); + + return Decl::isFlexibleArrayMemberLike(Ctx, D, E->getType(), + StrictFlexArraysLevel, + IgnoreTemplateOrMacroSubstitution); +} + +const ValueDecl * +Expr::getAsBuiltinConstantDeclRef(const ASTContext &Context) const { + Expr::EvalResult Eval; + + if (EvaluateAsConstantExpr(Eval, Context)) { + APValue &Value = Eval.Val; + + if (Value.isMemberPointer()) + return Value.getMemberPointerDecl(); + + if (Value.isLValue() && Value.getLValueOffset().isZero()) + return Value.getLValueBase().dyn_cast<const ValueDecl *>(); + } + + return nullptr; +} + // Amusing macro metaprogramming hack: check whether a class provides // a more specific implementation of getExprLoc(). // @@ -226,6 +263,17 @@ namespace { } } +QualType Expr::getEnumCoercedType(const ASTContext &Ctx) const { + if (isa<EnumType>(getType())) + return getType(); + if (const auto *ECD = getEnumConstantDecl()) { + const auto *ED = cast<EnumDecl>(ECD->getDeclContext()); + if (ED->isCompleteDefinition()) + return Ctx.getTypeDeclType(ED); + } + return getType(); +} + SourceLocation Expr::getExprLoc() const { switch (getStmtClass()) { case Stmt::NoStmtClass: llvm_unreachable("statement without class"); @@ -243,85 +291,86 @@ SourceLocation Expr::getExprLoc() const { // Primary Expressions. //===----------------------------------------------------------------------===// -static void AssertResultStorageKind(ConstantExpr::ResultStorageKind Kind) { - assert((Kind == ConstantExpr::RSK_APValue || - Kind == ConstantExpr::RSK_Int64 || Kind == ConstantExpr::RSK_None) && +static void AssertResultStorageKind(ConstantResultStorageKind Kind) { + assert((Kind == ConstantResultStorageKind::APValue || + Kind == ConstantResultStorageKind::Int64 || + Kind == ConstantResultStorageKind::None) && "Invalid StorageKind Value"); (void)Kind; } -ConstantExpr::ResultStorageKind -ConstantExpr::getStorageKind(const APValue &Value) { +ConstantResultStorageKind ConstantExpr::getStorageKind(const APValue &Value) { switch (Value.getKind()) { case APValue::None: case APValue::Indeterminate: - return ConstantExpr::RSK_None; + return ConstantResultStorageKind::None; case APValue::Int: if (!Value.getInt().needsCleanup()) - return ConstantExpr::RSK_Int64; - LLVM_FALLTHROUGH; + return ConstantResultStorageKind::Int64; + [[fallthrough]]; default: - return ConstantExpr::RSK_APValue; + return ConstantResultStorageKind::APValue; } } -ConstantExpr::ResultStorageKind +ConstantResultStorageKind ConstantExpr::getStorageKind(const Type *T, const ASTContext &Context) { if (T->isIntegralOrEnumerationType() && Context.getTypeInfo(T).Width <= 64) - return ConstantExpr::RSK_Int64; - return ConstantExpr::RSK_APValue; + return ConstantResultStorageKind::Int64; + return ConstantResultStorageKind::APValue; } -ConstantExpr::ConstantExpr(Expr *SubExpr, ResultStorageKind StorageKind, +ConstantExpr::ConstantExpr(Expr *SubExpr, ConstantResultStorageKind StorageKind, bool IsImmediateInvocation) : FullExpr(ConstantExprClass, SubExpr) { - ConstantExprBits.ResultKind = StorageKind; + ConstantExprBits.ResultKind = llvm::to_underlying(StorageKind); ConstantExprBits.APValueKind = APValue::None; ConstantExprBits.IsUnsigned = false; ConstantExprBits.BitWidth = 0; ConstantExprBits.HasCleanup = false; ConstantExprBits.IsImmediateInvocation = IsImmediateInvocation; - if (StorageKind == ConstantExpr::RSK_APValue) + if (StorageKind == ConstantResultStorageKind::APValue) ::new (getTrailingObjects<APValue>()) APValue(); } ConstantExpr *ConstantExpr::Create(const ASTContext &Context, Expr *E, - ResultStorageKind StorageKind, + ConstantResultStorageKind StorageKind, bool IsImmediateInvocation) { assert(!isa<ConstantExpr>(E)); AssertResultStorageKind(StorageKind); unsigned Size = totalSizeToAlloc<APValue, uint64_t>( - StorageKind == ConstantExpr::RSK_APValue, - StorageKind == ConstantExpr::RSK_Int64); + StorageKind == ConstantResultStorageKind::APValue, + StorageKind == ConstantResultStorageKind::Int64); void *Mem = Context.Allocate(Size, alignof(ConstantExpr)); return new (Mem) ConstantExpr(E, StorageKind, IsImmediateInvocation); } ConstantExpr *ConstantExpr::Create(const ASTContext &Context, Expr *E, const APValue &Result) { - ResultStorageKind StorageKind = getStorageKind(Result); + ConstantResultStorageKind StorageKind = getStorageKind(Result); ConstantExpr *Self = Create(Context, E, StorageKind); Self->SetResult(Result, Context); return Self; } -ConstantExpr::ConstantExpr(EmptyShell Empty, ResultStorageKind StorageKind) +ConstantExpr::ConstantExpr(EmptyShell Empty, + ConstantResultStorageKind StorageKind) : FullExpr(ConstantExprClass, Empty) { - ConstantExprBits.ResultKind = StorageKind; + ConstantExprBits.ResultKind = llvm::to_underlying(StorageKind); - if (StorageKind == ConstantExpr::RSK_APValue) + if (StorageKind == ConstantResultStorageKind::APValue) ::new (getTrailingObjects<APValue>()) APValue(); } ConstantExpr *ConstantExpr::CreateEmpty(const ASTContext &Context, - ResultStorageKind StorageKind) { + ConstantResultStorageKind StorageKind) { AssertResultStorageKind(StorageKind); unsigned Size = totalSizeToAlloc<APValue, uint64_t>( - StorageKind == ConstantExpr::RSK_APValue, - StorageKind == ConstantExpr::RSK_Int64); + StorageKind == ConstantResultStorageKind::APValue, + StorageKind == ConstantResultStorageKind::Int64); void *Mem = Context.Allocate(Size, alignof(ConstantExpr)); return new (Mem) ConstantExpr(EmptyShell(), StorageKind); } @@ -330,15 +379,15 @@ void ConstantExpr::MoveIntoResult(APValue &Value, const ASTContext &Context) { assert((unsigned)getStorageKind(Value) <= ConstantExprBits.ResultKind && "Invalid storage for this value kind"); ConstantExprBits.APValueKind = Value.getKind(); - switch (ConstantExprBits.ResultKind) { - case RSK_None: + switch (getResultStorageKind()) { + case ConstantResultStorageKind::None: return; - case RSK_Int64: + case ConstantResultStorageKind::Int64: Int64Result() = *Value.getInt().getRawData(); ConstantExprBits.BitWidth = Value.getInt().getBitWidth(); ConstantExprBits.IsUnsigned = Value.getInt().isUnsigned(); return; - case RSK_APValue: + case ConstantResultStorageKind::APValue: if (!ConstantExprBits.HasCleanup && Value.needsCleanup()) { ConstantExprBits.HasCleanup = true; Context.addDestruction(&APValueResult()); @@ -350,10 +399,10 @@ void ConstantExpr::MoveIntoResult(APValue &Value, const ASTContext &Context) { } llvm::APSInt ConstantExpr::getResultAsAPSInt() const { - switch (ConstantExprBits.ResultKind) { - case ConstantExpr::RSK_APValue: + switch (getResultStorageKind()) { + case ConstantResultStorageKind::APValue: return APValueResult().getInt(); - case ConstantExpr::RSK_Int64: + case ConstantResultStorageKind::Int64: return llvm::APSInt(llvm::APInt(ConstantExprBits.BitWidth, Int64Result()), ConstantExprBits.IsUnsigned); default: @@ -363,14 +412,14 @@ llvm::APSInt ConstantExpr::getResultAsAPSInt() const { APValue ConstantExpr::getAPValueResult() const { - switch (ConstantExprBits.ResultKind) { - case ConstantExpr::RSK_APValue: + switch (getResultStorageKind()) { + case ConstantResultStorageKind::APValue: return APValueResult(); - case ConstantExpr::RSK_Int64: + case ConstantResultStorageKind::Int64: return APValue( llvm::APSInt(llvm::APInt(ConstantExprBits.BitWidth, Int64Result()), ConstantExprBits.IsUnsigned)); - case ConstantExpr::RSK_None: + case ConstantResultStorageKind::None: if (ConstantExprBits.APValueKind == APValue::Indeterminate) return APValue::IndeterminateValue(); return APValue(); @@ -390,7 +439,9 @@ DeclRefExpr::DeclRefExpr(const ASTContext &Ctx, ValueDecl *D, DeclRefExprBits.HadMultipleCandidates = false; DeclRefExprBits.RefersToEnclosingVariableOrCapture = RefersToEnclosingVariableOrCapture; + DeclRefExprBits.CapturedByCopyInLambdaWithExplicitObjectParameter = false; DeclRefExprBits.NonOdrUseReason = NOUR; + DeclRefExprBits.IsImmediateEscalating = false; DeclRefExprBits.Loc = L; setDependence(computeDependence(this, Ctx)); } @@ -416,6 +467,7 @@ DeclRefExpr::DeclRefExpr(const ASTContext &Ctx, = (TemplateArgs || TemplateKWLoc.isValid()) ? 1 : 0; DeclRefExprBits.RefersToEnclosingVariableOrCapture = RefersToEnclosingVariableOrCapture; + DeclRefExprBits.CapturedByCopyInLambdaWithExplicitObjectParameter = false; DeclRefExprBits.NonOdrUseReason = NOUR; if (TemplateArgs) { auto Deps = TemplateArgumentDependence::None; @@ -428,6 +480,7 @@ DeclRefExpr::DeclRefExpr(const ASTContext &Ctx, getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom( TemplateKWLoc); } + DeclRefExprBits.IsImmediateEscalating = false; DeclRefExprBits.HadMultipleCandidates = 0; setDependence(computeDependence(this, Ctx)); } @@ -544,40 +597,33 @@ std::string SYCLUniqueStableNameExpr::ComputeName(ASTContext &Context) const { std::string SYCLUniqueStableNameExpr::ComputeName(ASTContext &Context, QualType Ty) { auto MangleCallback = [](ASTContext &Ctx, - const NamedDecl *ND) -> llvm::Optional<unsigned> { - // This replaces the 'lambda number' in the mangling with a unique number - // based on its order in the declaration. To provide some level of visual - // notability (actual uniqueness from normal lambdas isn't necessary, as - // these are used differently), we add 10,000 to the number. - // For example: - // _ZTSZ3foovEUlvE10005_ - // Demangles to: typeinfo name for foo()::'lambda10005'() - // Note that the mangler subtracts 2, since with normal lambdas the lambda - // mangling number '0' is an anonymous struct mangle, and '1' is omitted. - // So 10,002 results in the first number being 10,000. - if (Ctx.IsSYCLKernelNamingDecl(ND)) - return 10'002 + Ctx.GetSYCLKernelNamingIndex(ND); - return llvm::None; + const NamedDecl *ND) -> std::optional<unsigned> { + if (const auto *RD = dyn_cast<CXXRecordDecl>(ND)) + return RD->getDeviceLambdaManglingNumber(); + return std::nullopt; }; + std::unique_ptr<MangleContext> Ctx{ItaniumMangleContext::create( Context, Context.getDiagnostics(), MangleCallback)}; std::string Buffer; Buffer.reserve(128); llvm::raw_string_ostream Out(Buffer); - Ctx->mangleTypeName(Ty, Out); + Ctx->mangleCanonicalTypeName(Ty, Out); return Out.str(); } -PredefinedExpr::PredefinedExpr(SourceLocation L, QualType FNTy, IdentKind IK, +PredefinedExpr::PredefinedExpr(SourceLocation L, QualType FNTy, + PredefinedIdentKind IK, bool IsTransparent, StringLiteral *SL) : Expr(PredefinedExprClass, FNTy, VK_LValue, OK_Ordinary) { - PredefinedExprBits.Kind = IK; + PredefinedExprBits.Kind = llvm::to_underlying(IK); assert((getIdentKind() == IK) && "IdentKind do not fit in PredefinedExprBitfields!"); bool HasFunctionName = SL != nullptr; PredefinedExprBits.HasFunctionName = HasFunctionName; + PredefinedExprBits.IsTransparent = IsTransparent; PredefinedExprBits.Loc = L; if (HasFunctionName) setFunctionName(SL); @@ -590,12 +636,12 @@ PredefinedExpr::PredefinedExpr(EmptyShell Empty, bool HasFunctionName) } PredefinedExpr *PredefinedExpr::Create(const ASTContext &Ctx, SourceLocation L, - QualType FNTy, IdentKind IK, - StringLiteral *SL) { + QualType FNTy, PredefinedIdentKind IK, + bool IsTransparent, StringLiteral *SL) { bool HasFunctionName = SL != nullptr; void *Mem = Ctx.Allocate(totalSizeToAlloc<Stmt *>(HasFunctionName), alignof(PredefinedExpr)); - return new (Mem) PredefinedExpr(L, FNTy, IK, SL); + return new (Mem) PredefinedExpr(L, FNTy, IK, IsTransparent, SL); } PredefinedExpr *PredefinedExpr::CreateEmpty(const ASTContext &Ctx, @@ -605,23 +651,23 @@ PredefinedExpr *PredefinedExpr::CreateEmpty(const ASTContext &Ctx, return new (Mem) PredefinedExpr(EmptyShell(), HasFunctionName); } -StringRef PredefinedExpr::getIdentKindName(PredefinedExpr::IdentKind IK) { +StringRef PredefinedExpr::getIdentKindName(PredefinedIdentKind IK) { switch (IK) { - case Func: + case PredefinedIdentKind::Func: return "__func__"; - case Function: + case PredefinedIdentKind::Function: return "__FUNCTION__"; - case FuncDName: + case PredefinedIdentKind::FuncDName: return "__FUNCDNAME__"; - case LFunction: + case PredefinedIdentKind::LFunction: return "L__FUNCTION__"; - case PrettyFunction: + case PredefinedIdentKind::PrettyFunction: return "__PRETTY_FUNCTION__"; - case FuncSig: + case PredefinedIdentKind::FuncSig: return "__FUNCSIG__"; - case LFuncSig: + case PredefinedIdentKind::LFuncSig: return "L__FUNCSIG__"; - case PrettyFunctionNoVirtual: + case PredefinedIdentKind::PrettyFunctionNoVirtual: break; } llvm_unreachable("Unknown ident kind for PredefinedExpr"); @@ -629,10 +675,12 @@ StringRef PredefinedExpr::getIdentKindName(PredefinedExpr::IdentKind IK) { // FIXME: Maybe this should use DeclPrinter with a special "print predefined // expr" policy instead. -std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) { +std::string PredefinedExpr::ComputeName(PredefinedIdentKind IK, + const Decl *CurrentDecl, + bool ForceElaboratedPrinting) { ASTContext &Context = CurrentDecl->getASTContext(); - if (IK == PredefinedExpr::FuncDName) { + if (IK == PredefinedIdentKind::FuncDName) { if (const NamedDecl *ND = dyn_cast<NamedDecl>(CurrentDecl)) { std::unique_ptr<MangleContext> MC; MC.reset(Context.createMangleContext()); @@ -653,7 +701,7 @@ std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) { if (!Buffer.empty() && Buffer.front() == '\01') return std::string(Buffer.substr(1)); - return std::string(Buffer.str()); + return std::string(Buffer); } return std::string(ND->getIdentifier()->getName()); } @@ -677,21 +725,50 @@ std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) { return std::string(Out.str()); } if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CurrentDecl)) { - if (IK != PrettyFunction && IK != PrettyFunctionNoVirtual && - IK != FuncSig && IK != LFuncSig) + const auto &LO = Context.getLangOpts(); + bool IsFuncOrFunctionInNonMSVCCompatEnv = + ((IK == PredefinedIdentKind::Func || + IK == PredefinedIdentKind ::Function) && + !LO.MSVCCompat); + bool IsLFunctionInMSVCCommpatEnv = + IK == PredefinedIdentKind::LFunction && LO.MSVCCompat; + bool IsFuncOrFunctionOrLFunctionOrFuncDName = + IK != PredefinedIdentKind::PrettyFunction && + IK != PredefinedIdentKind::PrettyFunctionNoVirtual && + IK != PredefinedIdentKind::FuncSig && + IK != PredefinedIdentKind::LFuncSig; + if ((ForceElaboratedPrinting && + (IsFuncOrFunctionInNonMSVCCompatEnv || IsLFunctionInMSVCCommpatEnv)) || + (!ForceElaboratedPrinting && IsFuncOrFunctionOrLFunctionOrFuncDName)) return FD->getNameAsString(); SmallString<256> Name; llvm::raw_svector_ostream Out(Name); if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) { - if (MD->isVirtual() && IK != PrettyFunctionNoVirtual) + if (MD->isVirtual() && IK != PredefinedIdentKind::PrettyFunctionNoVirtual) Out << "virtual "; if (MD->isStatic()) Out << "static "; } + class PrettyCallbacks final : public PrintingCallbacks { + public: + PrettyCallbacks(const LangOptions &LO) : LO(LO) {} + std::string remapPath(StringRef Path) const override { + SmallString<128> p(Path); + LO.remapPathPrefix(p); + return std::string(p); + } + + private: + const LangOptions &LO; + }; PrintingPolicy Policy(Context.getLangOpts()); + PrettyCallbacks PrettyCB(Context.getLangOpts()); + Policy.Callbacks = &PrettyCB; + if (IK == PredefinedIdentKind::Function && ForceElaboratedPrinting) + Policy.SuppressTagKeyword = !LO.MSVCCompat; std::string Proto; llvm::raw_string_ostream POut(Proto); @@ -703,7 +780,8 @@ std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) { if (FD->hasWrittenPrototype()) FT = dyn_cast<FunctionProtoType>(AFT); - if (IK == FuncSig || IK == LFuncSig) { + if (IK == PredefinedIdentKind::FuncSig || + IK == PredefinedIdentKind::LFuncSig) { switch (AFT->getCallConv()) { case CC_C: POut << "__cdecl "; break; case CC_X86StdCall: POut << "__stdcall "; break; @@ -718,6 +796,12 @@ std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) { FD->printQualifiedName(POut, Policy); + if (IK == PredefinedIdentKind::Function) { + POut.flush(); + Out << Proto; + return std::string(Name); + } + POut << "("; if (FT) { for (unsigned i = 0, e = Decl->getNumParams(); i != e; ++i) { @@ -728,7 +812,8 @@ std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) { if (FT->isVariadic()) { if (FD->getNumParams()) POut << ", "; POut << "..."; - } else if ((IK == FuncSig || IK == LFuncSig || + } else if ((IK == PredefinedIdentKind::FuncSig || + IK == PredefinedIdentKind::LFuncSig || !Context.getLangOpts().CPlusPlus) && !Decl->getNumParams()) { POut << "void"; @@ -752,7 +837,7 @@ std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) { typedef SmallVector<const ClassTemplateSpecializationDecl *, 8> SpecsTy; SpecsTy Specs; const DeclContext *Ctx = FD->getDeclContext(); - while (Ctx && isa<NamedDecl>(Ctx)) { + while (isa_and_nonnull<NamedDecl>(Ctx)) { const ClassTemplateSpecializationDecl *Spec = dyn_cast<ClassTemplateSpecializationDecl>(Ctx); if (Spec && !Spec->isExplicitSpecialization()) @@ -762,19 +847,18 @@ std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) { std::string TemplateParams; llvm::raw_string_ostream TOut(TemplateParams); - for (SpecsTy::reverse_iterator I = Specs.rbegin(), E = Specs.rend(); - I != E; ++I) { - const TemplateParameterList *Params - = (*I)->getSpecializedTemplate()->getTemplateParameters(); - const TemplateArgumentList &Args = (*I)->getTemplateArgs(); + for (const ClassTemplateSpecializationDecl *D : llvm::reverse(Specs)) { + const TemplateParameterList *Params = + D->getSpecializedTemplate()->getTemplateParameters(); + const TemplateArgumentList &Args = D->getTemplateArgs(); assert(Params->size() == Args.size()); for (unsigned i = 0, numParams = Params->size(); i != numParams; ++i) { StringRef Param = Params->getParam(i)->getName(); if (Param.empty()) continue; TOut << Param << " = "; - Args.get(i).print( - Policy, TOut, - TemplateParameterList::shouldIncludeTypeForArgument(Params, i)); + Args.get(i).print(Policy, TOut, + TemplateParameterList::shouldIncludeTypeForArgument( + Policy, Params, i)); TOut << ", "; } } @@ -854,7 +938,8 @@ std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) { return std::string(Name); } - if (isa<TranslationUnitDecl>(CurrentDecl) && IK == PrettyFunction) { + if (isa<TranslationUnitDecl>(CurrentDecl) && + IK == PredefinedIdentKind::PrettyFunction) { // __PRETTY_FUNCTION__ -> "top level", the others produce an empty string. return "top level"; } @@ -931,67 +1016,37 @@ std::string FixedPointLiteral::getValueAsString(unsigned Radix) const { SmallString<64> S; FixedPointValueToString( S, llvm::APSInt::getUnsigned(getValue().getZExtValue()), Scale); - return std::string(S.str()); + return std::string(S); } -void CharacterLiteral::print(unsigned Val, CharacterKind Kind, +void CharacterLiteral::print(unsigned Val, CharacterLiteralKind Kind, raw_ostream &OS) { switch (Kind) { - case CharacterLiteral::Ascii: + case CharacterLiteralKind::Ascii: break; // no prefix. - case CharacterLiteral::Wide: + case CharacterLiteralKind::Wide: OS << 'L'; break; - case CharacterLiteral::UTF8: + case CharacterLiteralKind::UTF8: OS << "u8"; break; - case CharacterLiteral::UTF16: + case CharacterLiteralKind::UTF16: OS << 'u'; break; - case CharacterLiteral::UTF32: + case CharacterLiteralKind::UTF32: OS << 'U'; break; } - switch (Val) { - case '\\': - OS << "'\\\\'"; - break; - case '\'': - OS << "'\\''"; - break; - case '\a': - // TODO: K&R: the meaning of '\\a' is different in traditional C - OS << "'\\a'"; - break; - case '\b': - OS << "'\\b'"; - break; - // Nonstandard escape sequence. - /*case '\e': - OS << "'\\e'"; - break;*/ - case '\f': - OS << "'\\f'"; - break; - case '\n': - OS << "'\\n'"; - break; - case '\r': - OS << "'\\r'"; - break; - case '\t': - OS << "'\\t'"; - break; - case '\v': - OS << "'\\v'"; - break; - default: + StringRef Escaped = escapeCStyle<EscapeChar::Single>(Val); + if (!Escaped.empty()) { + OS << "'" << Escaped << "'"; + } else { // A character literal might be sign-extended, which // would result in an invalid \U escape sequence. // FIXME: multicharacter literals such as '\xFF\xFF\xFF\xFF' // are not correctly handled. - if ((Val & ~0xFFu) == ~0xFFu && Kind == CharacterLiteral::Ascii) + if ((Val & ~0xFFu) == ~0xFFu && Kind == CharacterLiteralKind::Ascii) Val &= 0xFFu; if (Val < 256 && isPrintable((unsigned char)Val)) OS << "'" << (char)Val << "'"; @@ -1042,22 +1097,24 @@ double FloatingLiteral::getValueAsApproximateDouble() const { } unsigned StringLiteral::mapCharByteWidth(TargetInfo const &Target, - StringKind SK) { + StringLiteralKind SK) { unsigned CharByteWidth = 0; switch (SK) { - case Ascii: - case UTF8: + case StringLiteralKind::Ordinary: + case StringLiteralKind::UTF8: CharByteWidth = Target.getCharWidth(); break; - case Wide: + case StringLiteralKind::Wide: CharByteWidth = Target.getWCharWidth(); break; - case UTF16: + case StringLiteralKind::UTF16: CharByteWidth = Target.getChar16Width(); break; - case UTF32: + case StringLiteralKind::UTF32: CharByteWidth = Target.getChar32Width(); break; + case StringLiteralKind::Unevaluated: + return sizeof(char); // Host; } assert((CharByteWidth & 7) == 0 && "Assumes character size is byte multiple"); CharByteWidth /= 8; @@ -1067,39 +1124,49 @@ unsigned StringLiteral::mapCharByteWidth(TargetInfo const &Target, } StringLiteral::StringLiteral(const ASTContext &Ctx, StringRef Str, - StringKind Kind, bool Pascal, QualType Ty, + StringLiteralKind Kind, bool Pascal, QualType Ty, const SourceLocation *Loc, unsigned NumConcatenated) : Expr(StringLiteralClass, Ty, VK_LValue, OK_Ordinary) { - assert(Ctx.getAsConstantArrayType(Ty) && - "StringLiteral must be of constant array type!"); - unsigned CharByteWidth = mapCharByteWidth(Ctx.getTargetInfo(), Kind); - unsigned ByteLength = Str.size(); - assert((ByteLength % CharByteWidth == 0) && - "The size of the data must be a multiple of CharByteWidth!"); - - // Avoid the expensive division. The compiler should be able to figure it - // out by itself. However as of clang 7, even with the appropriate - // llvm_unreachable added just here, it is not able to do so. - unsigned Length; - switch (CharByteWidth) { - case 1: - Length = ByteLength; - break; - case 2: - Length = ByteLength / 2; - break; - case 4: - Length = ByteLength / 4; - break; - default: - llvm_unreachable("Unsupported character width!"); - } - StringLiteralBits.Kind = Kind; - StringLiteralBits.CharByteWidth = CharByteWidth; - StringLiteralBits.IsPascal = Pascal; + unsigned Length = Str.size(); + + StringLiteralBits.Kind = llvm::to_underlying(Kind); StringLiteralBits.NumConcatenated = NumConcatenated; + + if (Kind != StringLiteralKind::Unevaluated) { + assert(Ctx.getAsConstantArrayType(Ty) && + "StringLiteral must be of constant array type!"); + unsigned CharByteWidth = mapCharByteWidth(Ctx.getTargetInfo(), Kind); + unsigned ByteLength = Str.size(); + assert((ByteLength % CharByteWidth == 0) && + "The size of the data must be a multiple of CharByteWidth!"); + + // Avoid the expensive division. The compiler should be able to figure it + // out by itself. However as of clang 7, even with the appropriate + // llvm_unreachable added just here, it is not able to do so. + switch (CharByteWidth) { + case 1: + Length = ByteLength; + break; + case 2: + Length = ByteLength / 2; + break; + case 4: + Length = ByteLength / 4; + break; + default: + llvm_unreachable("Unsupported character width!"); + } + + StringLiteralBits.CharByteWidth = CharByteWidth; + StringLiteralBits.IsPascal = Pascal; + } else { + assert(!Pascal && "Can't make an unevaluated Pascal string"); + StringLiteralBits.CharByteWidth = 1; + StringLiteralBits.IsPascal = false; + } + *getTrailingObjects<unsigned>() = Length; // Initialize the trailing array of SourceLocation. @@ -1108,7 +1175,7 @@ StringLiteral::StringLiteral(const ASTContext &Ctx, StringRef Str, NumConcatenated * sizeof(SourceLocation)); // Initialize the trailing array of char holding the string data. - std::memcpy(getTrailingObjects<char>(), Str.data(), ByteLength); + std::memcpy(getTrailingObjects<char>(), Str.data(), Str.size()); setDependence(ExprDependence::None); } @@ -1122,8 +1189,8 @@ StringLiteral::StringLiteral(EmptyShell Empty, unsigned NumConcatenated, } StringLiteral *StringLiteral::Create(const ASTContext &Ctx, StringRef Str, - StringKind Kind, bool Pascal, QualType Ty, - const SourceLocation *Loc, + StringLiteralKind Kind, bool Pascal, + QualType Ty, const SourceLocation *Loc, unsigned NumConcatenated) { void *Mem = Ctx.Allocate(totalSizeToAlloc<unsigned, SourceLocation, char>( 1, NumConcatenated, Str.size()), @@ -1145,25 +1212,36 @@ StringLiteral *StringLiteral::CreateEmpty(const ASTContext &Ctx, void StringLiteral::outputString(raw_ostream &OS) const { switch (getKind()) { - case Ascii: break; // no prefix. - case Wide: OS << 'L'; break; - case UTF8: OS << "u8"; break; - case UTF16: OS << 'u'; break; - case UTF32: OS << 'U'; break; + case StringLiteralKind::Unevaluated: + case StringLiteralKind::Ordinary: + break; // no prefix. + case StringLiteralKind::Wide: + OS << 'L'; + break; + case StringLiteralKind::UTF8: + OS << "u8"; + break; + case StringLiteralKind::UTF16: + OS << 'u'; + break; + case StringLiteralKind::UTF32: + OS << 'U'; + break; } OS << '"'; static const char Hex[] = "0123456789ABCDEF"; unsigned LastSlashX = getLength(); for (unsigned I = 0, N = getLength(); I != N; ++I) { - switch (uint32_t Char = getCodeUnit(I)) { - default: + uint32_t Char = getCodeUnit(I); + StringRef Escaped = escapeCStyle<EscapeChar::Double>(Char); + if (Escaped.empty()) { // FIXME: Convert UTF-8 back to codepoints before rendering. // Convert UTF-16 surrogate pairs back to codepoints before rendering. // Leave invalid surrogates alone; we'll use \x for those. - if (getKind() == UTF16 && I != N - 1 && Char >= 0xd800 && - Char <= 0xdbff) { + if (getKind() == StringLiteralKind::UTF16 && I != N - 1 && + Char >= 0xd800 && Char <= 0xdbff) { uint32_t Trail = getCodeUnit(I + 1); if (Trail >= 0xdc00 && Trail <= 0xdfff) { Char = 0x10000 + ((Char - 0xd800) << 10) + (Trail - 0xdc00); @@ -1175,7 +1253,7 @@ void StringLiteral::outputString(raw_ostream &OS) const { // If this is a wide string, output characters over 0xff using \x // escapes. Otherwise, this is a UTF-16 or UTF-32 string, and Char is a // codepoint: use \x escapes for invalid codepoints. - if (getKind() == Wide || + if (getKind() == StringLiteralKind::Wide || (Char >= 0xd800 && Char <= 0xdfff) || Char >= 0x110000) { // FIXME: Is this the best way to print wchar_t? OS << "\\x"; @@ -1185,7 +1263,7 @@ void StringLiteral::outputString(raw_ostream &OS) const { for (/**/; Shift >= 0; Shift -= 4) OS << Hex[(Char >> Shift) & 15]; LastSlashX = I; - break; + continue; } if (Char > 0xffff) @@ -1198,7 +1276,7 @@ void StringLiteral::outputString(raw_ostream &OS) const { << Hex[(Char >> 8) & 15] << Hex[(Char >> 4) & 15] << Hex[(Char >> 0) & 15]; - break; + continue; } // If we used \x... for the previous character, and this character is a @@ -1223,17 +1301,9 @@ void StringLiteral::outputString(raw_ostream &OS) const { << (char)('0' + ((Char >> 6) & 7)) << (char)('0' + ((Char >> 3) & 7)) << (char)('0' + ((Char >> 0) & 7)); - break; - // Handle some common non-printable cases to make dumps prettier. - case '\\': OS << "\\\\"; break; - case '"': OS << "\\\""; break; - case '\a': OS << "\\a"; break; - case '\b': OS << "\\b"; break; - case '\f': OS << "\\f"; break; - case '\n': OS << "\\n"; break; - case '\r': OS << "\\r"; break; - case '\t': OS << "\\t"; break; - case '\v': OS << "\\v"; break; + } else { + // Handle some common non-printable cases to make dumps prettier. + OS << Escaped; } } OS << '"'; @@ -1260,8 +1330,9 @@ StringLiteral::getLocationOfByte(unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, const TargetInfo &Target, unsigned *StartToken, unsigned *StartTokenByteOffset) const { - assert((getKind() == StringLiteral::Ascii || - getKind() == StringLiteral::UTF8) && + assert((getKind() == StringLiteralKind::Ordinary || + getKind() == StringLiteralKind::UTF8 || + getKind() == StringLiteralKind::Unevaluated) && "Only narrow string literals are currently supported"); // Loop over all of the tokens in this string until we find the one that @@ -1274,7 +1345,7 @@ StringLiteral::getLocationOfByte(unsigned ByteNo, const SourceManager &SM, StringOffset = *StartTokenByteOffset; ByteNo -= StringOffset; } - while (1) { + while (true) { assert(TokNo < getNumConcatenated() && "Invalid byte number!"); SourceLocation StrTokLoc = getStrTokenLoc(TokNo); @@ -1474,19 +1545,17 @@ unsigned CallExpr::offsetToTrailingObjects(StmtClass SC) { Decl *Expr::getReferencedDeclOfCallee() { Expr *CEE = IgnoreParenImpCasts(); - while (SubstNonTypeTemplateParmExpr *NTTP = - dyn_cast<SubstNonTypeTemplateParmExpr>(CEE)) { + while (auto *NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(CEE)) CEE = NTTP->getReplacement()->IgnoreParenImpCasts(); - } // If we're calling a dereference, look at the pointer instead. while (true) { - if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CEE)) { + if (auto *BO = dyn_cast<BinaryOperator>(CEE)) { if (BO->isPtrMemOp()) { CEE = BO->getRHS()->IgnoreParenImpCasts(); continue; } - } else if (UnaryOperator *UO = dyn_cast<UnaryOperator>(CEE)) { + } else if (auto *UO = dyn_cast<UnaryOperator>(CEE)) { if (UO->getOpcode() == UO_Deref || UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Plus) { CEE = UO->getSubExpr()->IgnoreParenImpCasts(); @@ -1496,9 +1565,9 @@ Decl *Expr::getReferencedDeclOfCallee() { break; } - if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CEE)) + if (auto *DRE = dyn_cast<DeclRefExpr>(CEE)) return DRE->getDecl(); - if (MemberExpr *ME = dyn_cast<MemberExpr>(CEE)) + if (auto *ME = dyn_cast<MemberExpr>(CEE)) return ME->getMemberDecl(); if (auto *BE = dyn_cast<BlockExpr>(CEE)) return BE->getBlockDecl(); @@ -1508,8 +1577,7 @@ Decl *Expr::getReferencedDeclOfCallee() { /// If this is a call to a builtin, return the builtin ID. If not, return 0. unsigned CallExpr::getBuiltinCallee() const { - auto *FDecl = - dyn_cast_or_null<FunctionDecl>(getCallee()->getReferencedDeclOfCallee()); + const auto *FDecl = getDirectCallee(); return FDecl ? FDecl->getBuiltinID() : 0; } @@ -1536,6 +1604,10 @@ QualType CallExpr::getCallReturnType(const ASTContext &Ctx) const { // This should never be overloaded and so should never return null. CalleeType = Expr::findBoundMemberType(Callee); assert(!CalleeType.isNull()); + } else if (CalleeType->isRecordType()) { + // If the Callee is a record type, then it is a not-yet-resolved + // dependent call to the call operator of that type. + return Ctx.DependentTy; } else if (CalleeType->isDependentType() || CalleeType->isSpecificPlaceholderType(BuiltinType::Overload)) { return Ctx.DependentTy; @@ -1552,6 +1624,11 @@ const Attr *CallExpr::getUnusedResultAttr(const ASTContext &Ctx) const { if (const auto *A = TD->getAttr<WarnUnusedResultAttr>()) return A; + for (const auto *TD = getCallReturnType(Ctx)->getAs<TypedefType>(); TD; + TD = TD->desugar()->getAs<TypedefType>()) + if (const auto *A = TD->getDecl()->getAttr<WarnUnusedResultAttr>()) + return A; + // Otherwise, see if the callee is marked nodiscard and return that attribute // instead. const Decl *D = getCalleeDecl(); @@ -1559,8 +1636,8 @@ const Attr *CallExpr::getUnusedResultAttr(const ASTContext &Ctx) const { } SourceLocation CallExpr::getBeginLoc() const { - if (isa<CXXOperatorCallExpr>(this)) - return cast<CXXOperatorCallExpr>(this)->getBeginLoc(); + if (const auto *OCE = dyn_cast<CXXOperatorCallExpr>(this)) + return OCE->getBeginLoc(); SourceLocation begin = getCallee()->getBeginLoc(); if (begin.isInvalid() && getNumArgs() > 0 && getArg(0)) @@ -1568,8 +1645,8 @@ SourceLocation CallExpr::getBeginLoc() const { return begin; } SourceLocation CallExpr::getEndLoc() const { - if (isa<CXXOperatorCallExpr>(this)) - return cast<CXXOperatorCallExpr>(this)->getEndLoc(); + if (const auto *OCE = dyn_cast<CXXOperatorCallExpr>(this)) + return OCE->getEndLoc(); SourceLocation end = getRParenLoc(); if (end.isInvalid() && getNumArgs() > 0 && getArg(getNumArgs() - 1)) @@ -1635,8 +1712,11 @@ UnaryExprOrTypeTraitExpr::UnaryExprOrTypeTraitExpr( } MemberExpr::MemberExpr(Expr *Base, bool IsArrow, SourceLocation OperatorLoc, - ValueDecl *MemberDecl, - const DeclarationNameInfo &NameInfo, QualType T, + NestedNameSpecifierLoc QualifierLoc, + SourceLocation TemplateKWLoc, ValueDecl *MemberDecl, + DeclAccessPair FoundDecl, + const DeclarationNameInfo &NameInfo, + const TemplateArgumentListInfo *TemplateArgs, QualType T, ExprValueKind VK, ExprObjectKind OK, NonOdrUseReason NOUR) : Expr(MemberExprClass, T, VK, OK), Base(Base), MemberDecl(MemberDecl), @@ -1644,11 +1724,30 @@ MemberExpr::MemberExpr(Expr *Base, bool IsArrow, SourceLocation OperatorLoc, assert(!NameInfo.getName() || MemberDecl->getDeclName() == NameInfo.getName()); MemberExprBits.IsArrow = IsArrow; - MemberExprBits.HasQualifierOrFoundDecl = false; - MemberExprBits.HasTemplateKWAndArgsInfo = false; + MemberExprBits.HasQualifier = QualifierLoc.hasQualifier(); + MemberExprBits.HasFoundDecl = + FoundDecl.getDecl() != MemberDecl || + FoundDecl.getAccess() != MemberDecl->getAccess(); + MemberExprBits.HasTemplateKWAndArgsInfo = + TemplateArgs || TemplateKWLoc.isValid(); MemberExprBits.HadMultipleCandidates = false; MemberExprBits.NonOdrUseReason = NOUR; MemberExprBits.OperatorLoc = OperatorLoc; + + if (hasQualifier()) + new (getTrailingObjects<NestedNameSpecifierLoc>()) + NestedNameSpecifierLoc(QualifierLoc); + if (hasFoundDecl()) + *getTrailingObjects<DeclAccessPair>() = FoundDecl; + if (TemplateArgs) { + auto Deps = TemplateArgumentDependence::None; + getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom( + TemplateKWLoc, *TemplateArgs, getTrailingObjects<TemplateArgumentLoc>(), + Deps); + } else if (TemplateKWLoc.isValid()) { + getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom( + TemplateKWLoc); + } setDependence(computeDependence(this)); } @@ -1658,54 +1757,20 @@ MemberExpr *MemberExpr::Create( ValueDecl *MemberDecl, DeclAccessPair FoundDecl, DeclarationNameInfo NameInfo, const TemplateArgumentListInfo *TemplateArgs, QualType T, ExprValueKind VK, ExprObjectKind OK, NonOdrUseReason NOUR) { - bool HasQualOrFound = QualifierLoc || FoundDecl.getDecl() != MemberDecl || - FoundDecl.getAccess() != MemberDecl->getAccess(); + bool HasQualifier = QualifierLoc.hasQualifier(); + bool HasFoundDecl = FoundDecl.getDecl() != MemberDecl || + FoundDecl.getAccess() != MemberDecl->getAccess(); bool HasTemplateKWAndArgsInfo = TemplateArgs || TemplateKWLoc.isValid(); std::size_t Size = - totalSizeToAlloc<MemberExprNameQualifier, ASTTemplateKWAndArgsInfo, - TemplateArgumentLoc>( - HasQualOrFound ? 1 : 0, HasTemplateKWAndArgsInfo ? 1 : 0, + totalSizeToAlloc<NestedNameSpecifierLoc, DeclAccessPair, + ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>( + HasQualifier, HasFoundDecl, HasTemplateKWAndArgsInfo, TemplateArgs ? TemplateArgs->size() : 0); void *Mem = C.Allocate(Size, alignof(MemberExpr)); - MemberExpr *E = new (Mem) MemberExpr(Base, IsArrow, OperatorLoc, MemberDecl, - NameInfo, T, VK, OK, NOUR); - - // FIXME: remove remaining dependence computation to computeDependence(). - auto Deps = E->getDependence(); - if (HasQualOrFound) { - // FIXME: Wrong. We should be looking at the member declaration we found. - if (QualifierLoc && QualifierLoc.getNestedNameSpecifier()->isDependent()) - Deps |= ExprDependence::TypeValueInstantiation; - else if (QualifierLoc && - QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent()) - Deps |= ExprDependence::Instantiation; - - E->MemberExprBits.HasQualifierOrFoundDecl = true; - - MemberExprNameQualifier *NQ = - E->getTrailingObjects<MemberExprNameQualifier>(); - NQ->QualifierLoc = QualifierLoc; - NQ->FoundDecl = FoundDecl; - } - - E->MemberExprBits.HasTemplateKWAndArgsInfo = - TemplateArgs || TemplateKWLoc.isValid(); - - if (TemplateArgs) { - auto TemplateArgDeps = TemplateArgumentDependence::None; - E->getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom( - TemplateKWLoc, *TemplateArgs, - E->getTrailingObjects<TemplateArgumentLoc>(), TemplateArgDeps); - if (TemplateArgDeps & TemplateArgumentDependence::Instantiation) - Deps |= ExprDependence::Instantiation; - } else if (TemplateKWLoc.isValid()) { - E->getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom( - TemplateKWLoc); - } - E->setDependence(Deps); - - return E; + return new (Mem) MemberExpr(Base, IsArrow, OperatorLoc, QualifierLoc, + TemplateKWLoc, MemberDecl, FoundDecl, NameInfo, + TemplateArgs, T, VK, OK, NOUR); } MemberExpr *MemberExpr::CreateEmpty(const ASTContext &Context, @@ -1714,12 +1779,11 @@ MemberExpr *MemberExpr::CreateEmpty(const ASTContext &Context, unsigned NumTemplateArgs) { assert((!NumTemplateArgs || HasTemplateKWAndArgsInfo) && "template args but no template arg info?"); - bool HasQualOrFound = HasQualifier || HasFoundDecl; std::size_t Size = - totalSizeToAlloc<MemberExprNameQualifier, ASTTemplateKWAndArgsInfo, - TemplateArgumentLoc>(HasQualOrFound ? 1 : 0, - HasTemplateKWAndArgsInfo ? 1 : 0, - NumTemplateArgs); + totalSizeToAlloc<NestedNameSpecifierLoc, DeclAccessPair, + ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>( + HasQualifier, HasFoundDecl, HasTemplateKWAndArgsInfo, + NumTemplateArgs); void *Mem = Context.Allocate(Size, alignof(MemberExpr)); return new (Mem) MemberExpr(EmptyShell()); } @@ -1857,6 +1921,7 @@ bool CastExpr::CastConsistency() const { case CK_FixedPointToIntegral: case CK_IntegralToFixedPoint: case CK_MatrixCast: + case CK_HLSLVectorTruncation: assert(!getType()->isBooleanType() && "unheralded conversion to bool"); goto CheckNoBasePath; @@ -1876,6 +1941,7 @@ bool CastExpr::CastConsistency() const { case CK_UserDefinedConversion: // operator bool() case CK_BuiltinFnToFnPtr: case CK_FixedPointToBoolean: + case CK_HLSLArrayRValue: CheckNoBasePath: assert(path_empty() && "Cast kind should not have a base path!"); break; @@ -1892,51 +1958,53 @@ const char *CastExpr::getCastKindName(CastKind CK) { } namespace { - const Expr *skipImplicitTemporary(const Expr *E) { - // Skip through reference binding to temporary. - if (auto *Materialize = dyn_cast<MaterializeTemporaryExpr>(E)) - E = Materialize->getSubExpr(); +// Skip over implicit nodes produced as part of semantic analysis. +// Designed for use with IgnoreExprNodes. +static Expr *ignoreImplicitSemaNodes(Expr *E) { + if (auto *Materialize = dyn_cast<MaterializeTemporaryExpr>(E)) + return Materialize->getSubExpr(); - // Skip any temporary bindings; they're implicit. - if (auto *Binder = dyn_cast<CXXBindTemporaryExpr>(E)) - E = Binder->getSubExpr(); + if (auto *Binder = dyn_cast<CXXBindTemporaryExpr>(E)) + return Binder->getSubExpr(); - return E; - } + if (auto *Full = dyn_cast<FullExpr>(E)) + return Full->getSubExpr(); + + if (auto *CPLIE = dyn_cast<CXXParenListInitExpr>(E); + CPLIE && CPLIE->getInitExprs().size() == 1) + return CPLIE->getInitExprs()[0]; + + return E; } +} // namespace Expr *CastExpr::getSubExprAsWritten() { const Expr *SubExpr = nullptr; - const CastExpr *E = this; - do { - SubExpr = skipImplicitTemporary(E->getSubExpr()); + + for (const CastExpr *E = this; E; E = dyn_cast<ImplicitCastExpr>(SubExpr)) { + SubExpr = IgnoreExprNodes(E->getSubExpr(), ignoreImplicitSemaNodes); // Conversions by constructor and conversion functions have a // subexpression describing the call; strip it off. - if (E->getCastKind() == CK_ConstructorConversion) - SubExpr = - skipImplicitTemporary(cast<CXXConstructExpr>(SubExpr->IgnoreImplicit())->getArg(0)); - else if (E->getCastKind() == CK_UserDefinedConversion) { - SubExpr = SubExpr->IgnoreImplicit(); - assert((isa<CXXMemberCallExpr>(SubExpr) || - isa<BlockExpr>(SubExpr)) && + if (E->getCastKind() == CK_ConstructorConversion) { + SubExpr = IgnoreExprNodes(cast<CXXConstructExpr>(SubExpr)->getArg(0), + ignoreImplicitSemaNodes); + } else if (E->getCastKind() == CK_UserDefinedConversion) { + assert((isa<CXXMemberCallExpr>(SubExpr) || isa<BlockExpr>(SubExpr)) && "Unexpected SubExpr for CK_UserDefinedConversion."); if (auto *MCE = dyn_cast<CXXMemberCallExpr>(SubExpr)) SubExpr = MCE->getImplicitObjectArgument(); } + } - // If the subexpression we're left with is an implicit cast, look - // through that, too. - } while ((E = dyn_cast<ImplicitCastExpr>(SubExpr))); - - return const_cast<Expr*>(SubExpr); + return const_cast<Expr *>(SubExpr); } NamedDecl *CastExpr::getConversionFunction() const { const Expr *SubExpr = nullptr; for (const CastExpr *E = this; E; E = dyn_cast<ImplicitCastExpr>(SubExpr)) { - SubExpr = skipImplicitTemporary(E->getSubExpr()); + SubExpr = IgnoreExprNodes(E->getSubExpr(), ignoreImplicitSemaNodes); if (E->getCastKind() == CK_ConstructorConversion) return cast<CXXConstructExpr>(SubExpr)->getConstructor(); @@ -1976,7 +2044,7 @@ const FieldDecl *CastExpr::getTargetFieldForToUnionCast(const RecordDecl *RD, for (Field = RD->field_begin(), FieldEnd = RD->field_end(); Field != FieldEnd; ++Field) { if (Ctx.hasSameUnqualifiedType(Field->getType(), OpType) && - !Field->isUnnamedBitfield()) { + !Field->isUnnamedBitField()) { return *Field; } } @@ -2137,12 +2205,13 @@ OverloadedOperatorKind BinaryOperator::getOverloadedOperator(Opcode Opc) { bool BinaryOperator::isNullPointerArithmeticExtension(ASTContext &Ctx, Opcode Opc, - Expr *LHS, Expr *RHS) { + const Expr *LHS, + const Expr *RHS) { if (Opc != BO_Add) return false; // Check that we have one pointer and one integer operand. - Expr *PExp; + const Expr *PExp; if (LHS->getType()->isPointerType()) { if (!RHS->getType()->isIntegerType()) return false; @@ -2168,41 +2237,35 @@ bool BinaryOperator::isNullPointerArithmeticExtension(ASTContext &Ctx, return true; } -static QualType getDecayedSourceLocExprType(const ASTContext &Ctx, - SourceLocExpr::IdentKind Kind) { - switch (Kind) { - case SourceLocExpr::File: - case SourceLocExpr::Function: { - QualType ArrTy = Ctx.getStringLiteralArrayType(Ctx.CharTy, 0); - return Ctx.getPointerType(ArrTy->getAsArrayTypeUnsafe()->getElementType()); - } - case SourceLocExpr::Line: - case SourceLocExpr::Column: - return Ctx.UnsignedIntTy; - } - llvm_unreachable("unhandled case"); -} - -SourceLocExpr::SourceLocExpr(const ASTContext &Ctx, IdentKind Kind, - SourceLocation BLoc, SourceLocation RParenLoc, +SourceLocExpr::SourceLocExpr(const ASTContext &Ctx, SourceLocIdentKind Kind, + QualType ResultTy, SourceLocation BLoc, + SourceLocation RParenLoc, DeclContext *ParentContext) - : Expr(SourceLocExprClass, getDecayedSourceLocExprType(Ctx, Kind), - VK_PRValue, OK_Ordinary), + : Expr(SourceLocExprClass, ResultTy, VK_PRValue, OK_Ordinary), BuiltinLoc(BLoc), RParenLoc(RParenLoc), ParentContext(ParentContext) { - SourceLocExprBits.Kind = Kind; - setDependence(ExprDependence::None); + SourceLocExprBits.Kind = llvm::to_underlying(Kind); + // In dependent contexts, function names may change. + setDependence(MayBeDependent(Kind) && ParentContext->isDependentContext() + ? ExprDependence::Value + : ExprDependence::None); } StringRef SourceLocExpr::getBuiltinStr() const { switch (getIdentKind()) { - case File: + case SourceLocIdentKind::File: return "__builtin_FILE"; - case Function: + case SourceLocIdentKind::FileName: + return "__builtin_FILE_NAME"; + case SourceLocIdentKind::Function: return "__builtin_FUNCTION"; - case Line: + case SourceLocIdentKind::FuncSig: + return "__builtin_FUNCSIG"; + case SourceLocIdentKind::Line: return "__builtin_LINE"; - case Column: + case SourceLocIdentKind::Column: return "__builtin_COLUMN"; + case SourceLocIdentKind::SourceLocStruct: + return "__builtin_source_location"; } llvm_unreachable("unexpected IdentKind!"); } @@ -2212,14 +2275,17 @@ APValue SourceLocExpr::EvaluateInContext(const ASTContext &Ctx, SourceLocation Loc; const DeclContext *Context; - std::tie(Loc, - Context) = [&]() -> std::pair<SourceLocation, const DeclContext *> { - if (auto *DIE = dyn_cast_or_null<CXXDefaultInitExpr>(DefaultExpr)) - return {DIE->getUsedLocation(), DIE->getUsedContext()}; - if (auto *DAE = dyn_cast_or_null<CXXDefaultArgExpr>(DefaultExpr)) - return {DAE->getUsedLocation(), DAE->getUsedContext()}; - return {this->getLocation(), this->getParentContext()}; - }(); + if (const auto *DIE = dyn_cast_if_present<CXXDefaultInitExpr>(DefaultExpr)) { + Loc = DIE->getUsedLocation(); + Context = DIE->getUsedContext(); + } else if (const auto *DAE = + dyn_cast_if_present<CXXDefaultArgExpr>(DefaultExpr)) { + Loc = DAE->getUsedLocation(); + Context = DAE->getUsedContext(); + } else { + Loc = getLocation(); + Context = getParentContext(); + } PresumedLoc PLoc = Ctx.getSourceManager().getPresumedLoc( Ctx.getSourceManager().getExpansionRange(Loc).getEnd()); @@ -2233,29 +2299,91 @@ APValue SourceLocExpr::EvaluateInContext(const ASTContext &Ctx, }; switch (getIdentKind()) { - case SourceLocExpr::File: { + case SourceLocIdentKind::FileName: { + // __builtin_FILE_NAME() is a Clang-specific extension that expands to the + // the last part of __builtin_FILE(). + SmallString<256> FileName; + clang::Preprocessor::processPathToFileName( + FileName, PLoc, Ctx.getLangOpts(), Ctx.getTargetInfo()); + return MakeStringLiteral(FileName); + } + case SourceLocIdentKind::File: { SmallString<256> Path(PLoc.getFilename()); - Ctx.getLangOpts().remapPathPrefix(Path); + clang::Preprocessor::processPathForFileMacro(Path, Ctx.getLangOpts(), + Ctx.getTargetInfo()); return MakeStringLiteral(Path); } - case SourceLocExpr::Function: { - const Decl *CurDecl = dyn_cast_or_null<Decl>(Context); + case SourceLocIdentKind::Function: + case SourceLocIdentKind::FuncSig: { + const auto *CurDecl = dyn_cast<Decl>(Context); + const auto Kind = getIdentKind() == SourceLocIdentKind::Function + ? PredefinedIdentKind::Function + : PredefinedIdentKind::FuncSig; return MakeStringLiteral( - CurDecl ? PredefinedExpr::ComputeName(PredefinedExpr::Function, CurDecl) - : std::string("")); + CurDecl ? PredefinedExpr::ComputeName(Kind, CurDecl) : std::string("")); } - case SourceLocExpr::Line: - case SourceLocExpr::Column: { - llvm::APSInt IntVal(Ctx.getIntWidth(Ctx.UnsignedIntTy), - /*isUnsigned=*/true); - IntVal = getIdentKind() == SourceLocExpr::Line ? PLoc.getLine() - : PLoc.getColumn(); - return APValue(IntVal); + case SourceLocIdentKind::Line: + return APValue(Ctx.MakeIntValue(PLoc.getLine(), Ctx.UnsignedIntTy)); + case SourceLocIdentKind::Column: + return APValue(Ctx.MakeIntValue(PLoc.getColumn(), Ctx.UnsignedIntTy)); + case SourceLocIdentKind::SourceLocStruct: { + // Fill in a std::source_location::__impl structure, by creating an + // artificial file-scoped CompoundLiteralExpr, and returning a pointer to + // that. + const CXXRecordDecl *ImplDecl = getType()->getPointeeCXXRecordDecl(); + assert(ImplDecl); + + // Construct an APValue for the __impl struct, and get or create a Decl + // corresponding to that. Note that we've already verified that the shape of + // the ImplDecl type is as expected. + + APValue Value(APValue::UninitStruct(), 0, 4); + for (const FieldDecl *F : ImplDecl->fields()) { + StringRef Name = F->getName(); + if (Name == "_M_file_name") { + SmallString<256> Path(PLoc.getFilename()); + clang::Preprocessor::processPathForFileMacro(Path, Ctx.getLangOpts(), + Ctx.getTargetInfo()); + Value.getStructField(F->getFieldIndex()) = MakeStringLiteral(Path); + } else if (Name == "_M_function_name") { + // Note: this emits the PrettyFunction name -- different than what + // __builtin_FUNCTION() above returns! + const auto *CurDecl = dyn_cast<Decl>(Context); + Value.getStructField(F->getFieldIndex()) = MakeStringLiteral( + CurDecl && !isa<TranslationUnitDecl>(CurDecl) + ? StringRef(PredefinedExpr::ComputeName( + PredefinedIdentKind::PrettyFunction, CurDecl)) + : ""); + } else if (Name == "_M_line") { + llvm::APSInt IntVal = Ctx.MakeIntValue(PLoc.getLine(), F->getType()); + Value.getStructField(F->getFieldIndex()) = APValue(IntVal); + } else if (Name == "_M_column") { + llvm::APSInt IntVal = Ctx.MakeIntValue(PLoc.getColumn(), F->getType()); + Value.getStructField(F->getFieldIndex()) = APValue(IntVal); + } + } + + UnnamedGlobalConstantDecl *GV = + Ctx.getUnnamedGlobalConstantDecl(getType()->getPointeeType(), Value); + + return APValue(GV, CharUnits::Zero(), ArrayRef<APValue::LValuePathEntry>{}, + false); } } llvm_unreachable("unhandled case"); } +EmbedExpr::EmbedExpr(const ASTContext &Ctx, SourceLocation Loc, + EmbedDataStorage *Data, unsigned Begin, + unsigned NumOfElements) + : Expr(EmbedExprClass, Ctx.IntTy, VK_PRValue, OK_Ordinary), + EmbedKeywordLoc(Loc), Ctx(&Ctx), Data(Data), Begin(Begin), + NumOfElements(NumOfElements) { + setDependence(ExprDependence::None); + FakeChildNode = IntegerLiteral::Create( + Ctx, llvm::APInt::getZero(Ctx.getTypeSize(getType())), getType(), Loc); +} + InitListExpr::InitListExpr(const ASTContext &C, SourceLocation lbraceloc, ArrayRef<Expr *> initExprs, SourceLocation rbraceloc) : Expr(InitListExprClass, QualType(), VK_PRValue, OK_Ordinary), @@ -2308,7 +2436,7 @@ bool InitListExpr::isStringLiteralInit() const { const Expr *Init = getInit(0); if (!Init) return false; - Init = Init->IgnoreParens(); + Init = Init->IgnoreParenImpCasts(); return isa<StringLiteral>(Init) || isa<ObjCEncodeExpr>(Init); } @@ -2370,10 +2498,8 @@ SourceLocation InitListExpr::getEndLoc() const { SourceLocation End = RBraceLoc; if (End.isInvalid()) { // Find the first non-null initializer from the end. - for (InitExprsTy::const_reverse_iterator I = InitExprs.rbegin(), - E = InitExprs.rend(); - I != E; ++I) { - if (Stmt *S = *I) { + for (Stmt *S : llvm::reverse(InitExprs)) { + if (S) { End = S->getEndLoc(); break; } @@ -2457,8 +2583,12 @@ bool Expr::isReadIfDiscardedInCPlusPlus11() const { } // Objective-C++ extensions to the rule. - if (isa<PseudoObjectExpr>(E) || isa<ObjCIvarRefExpr>(E)) + if (isa<ObjCIvarRefExpr>(E)) return true; + if (const auto *POE = dyn_cast<PseudoObjectExpr>(E)) { + if (isa<ObjCPropertyRefExpr, ObjCSubscriptRefExpr>(POE->getSyntacticForm())) + return true; + } return false; } @@ -2620,7 +2750,7 @@ bool Expr::isUnusedResultAWarning(const Expr *&WarnE, SourceLocation &Loc, } // Fallthrough for generic call handling. - LLVM_FALLTHROUGH; + [[fallthrough]]; } case CallExprClass: case CXXMemberCallExprClass: @@ -2708,23 +2838,35 @@ bool Expr::isUnusedResultAWarning(const Expr *&WarnE, SourceLocation &Loc, } case ObjCPropertyRefExprClass: + case ObjCSubscriptRefExprClass: WarnE = this; Loc = getExprLoc(); R1 = getSourceRange(); return true; case PseudoObjectExprClass: { - const PseudoObjectExpr *PO = cast<PseudoObjectExpr>(this); + const auto *POE = cast<PseudoObjectExpr>(this); - // Only complain about things that have the form of a getter. - if (isa<UnaryOperator>(PO->getSyntacticForm()) || - isa<BinaryOperator>(PO->getSyntacticForm())) - return false; + // For some syntactic forms, we should always warn. + if (isa<ObjCPropertyRefExpr, ObjCSubscriptRefExpr>( + POE->getSyntacticForm())) { + WarnE = this; + Loc = getExprLoc(); + R1 = getSourceRange(); + return true; + } - WarnE = this; - Loc = getExprLoc(); - R1 = getSourceRange(); - return true; + // For others, we should never warn. + if (auto *BO = dyn_cast<BinaryOperator>(POE->getSyntacticForm())) + if (BO->isAssignmentOp()) + return false; + if (auto *UO = dyn_cast<UnaryOperator>(POE->getSyntacticForm())) + if (UO->isIncrementDecrementOp()) + return false; + + // Otherwise, warn if the result expression would warn. + const Expr *Result = POE->getResultExpr(); + return Result && Result->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx); } case StmtExprClass: { @@ -2936,7 +3078,7 @@ Expr *Expr::IgnoreParenCasts() { Expr *Expr::IgnoreConversionOperatorSingleStep() { if (auto *MCE = dyn_cast<CXXMemberCallExpr>(this)) { - if (MCE->getMethodDecl() && isa<CXXConversionDecl>(MCE->getMethodDecl())) + if (isa_and_nonnull<CXXConversionDecl>(MCE->getMethodDecl())) return MCE->getImplicitObjectArgument(); } return this; @@ -3168,6 +3310,10 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef, // kill the second parameter. if (IsForRef) { + if (auto *EWC = dyn_cast<ExprWithCleanups>(this)) + return EWC->getSubExpr()->isConstantInitializer(Ctx, true, Culprit); + if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(this)) + return MTE->getSubExpr()->isConstantInitializer(Ctx, false, Culprit); EvalResult Result; if (EvaluateAsLValue(Result, Ctx) && !Result.HasSideEffects) return true; @@ -3219,6 +3365,12 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef, DIUE->getUpdater()->isConstantInitializer(Ctx, false, Culprit); } case InitListExprClass: { + // C++ [dcl.init.aggr]p2: + // The elements of an aggregate are: + // - for an array, the array elements in increasing subscript order, or + // - for a class, the direct base classes in declaration order, followed + // by the direct non-static data members (11.4) that are not members of + // an anonymous union, in declaration order. const InitListExpr *ILE = cast<InitListExpr>(this); assert(ILE->isSemanticForm() && "InitListExpr must be in semantic form"); if (ILE->getType()->isArrayType()) { @@ -3233,13 +3385,26 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef, if (ILE->getType()->isRecordType()) { unsigned ElementNo = 0; RecordDecl *RD = ILE->getType()->castAs<RecordType>()->getDecl(); + + // In C++17, bases were added to the list of members used by aggregate + // initialization. + if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { + for (unsigned i = 0, e = CXXRD->getNumBases(); i < e; i++) { + if (ElementNo < ILE->getNumInits()) { + const Expr *Elt = ILE->getInit(ElementNo++); + if (!Elt->isConstantInitializer(Ctx, false, Culprit)) + return false; + } + } + } + for (const auto *Field : RD->fields()) { // If this is a union, skip all the fields that aren't being initialized. if (RD->isUnion() && ILE->getInitializedFieldInUnion() != Field) continue; // Don't emit anonymous bitfields, they just affect layout. - if (Field->isUnnamedBitfield()) + if (Field->isUnnamedBitField()) continue; if (ElementNo < ILE->getNumInits()) { @@ -3287,6 +3452,11 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef, return Exp->getSubExpr()->isConstantInitializer(Ctx, false, Culprit); break; } + case PackIndexingExprClass: { + return cast<PackIndexingExpr>(this) + ->getSelectedExpr() + ->isConstantInitializer(Ctx, false, Culprit); + } case CXXFunctionalCastExprClass: case CXXStaticCastExprClass: case ImplicitCastExprClass: @@ -3305,6 +3475,7 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef, CE->getCastKind() == CK_ConstructorConversion || CE->getCastKind() == CK_NonAtomicToAtomic || CE->getCastKind() == CK_AtomicToNonAtomic || + CE->getCastKind() == CK_NullToPointer || CE->getCastKind() == CK_IntToOCLSampler) return CE->getSubExpr()->isConstantInitializer(Ctx, false, Culprit); @@ -3336,9 +3507,9 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef, } bool CallExpr::isBuiltinAssumeFalse(const ASTContext &Ctx) const { - const FunctionDecl* FD = getDirectCallee(); - if (!FD || (FD->getBuiltinID() != Builtin::BI__assume && - FD->getBuiltinID() != Builtin::BI__builtin_assume)) + unsigned BuiltinID = getBuiltinCallee(); + if (BuiltinID != Builtin::BI__assume && + BuiltinID != Builtin::BI__builtin_assume) return false; const Expr* Arg = getArg(0); @@ -3347,6 +3518,10 @@ bool CallExpr::isBuiltinAssumeFalse(const ASTContext &Ctx) const { Arg->EvaluateAsBooleanCondition(ArgVal, Ctx) && !ArgVal; } +bool CallExpr::isCallToStdMove() const { + return getBuiltinCallee() == Builtin::BImove; +} + namespace { /// Look for any side effects within a Stmt. class SideEffectFinder : public ConstEvaluatedExprVisitor<SideEffectFinder> { @@ -3451,9 +3626,11 @@ bool Expr::HasSideEffects(const ASTContext &Ctx, case CXXUuidofExprClass: case OpaqueValueExprClass: case SourceLocExprClass: + case EmbedExprClass: case ConceptSpecializationExprClass: case RequiresExprClass: case SYCLUniqueStableNameExprClass: + case PackIndexingExprClass: // These never have a side-effect. return false; @@ -3513,7 +3690,7 @@ bool Expr::HasSideEffects(const ASTContext &Ctx, case ParenExprClass: case ArraySubscriptExprClass: case MatrixSubscriptExprClass: - case OMPArraySectionExprClass: + case ArraySectionExprClass: case OMPArrayShapingExprClass: case OMPIteratorExprClass: case MemberExprClass: @@ -3533,6 +3710,7 @@ bool Expr::HasSideEffects(const ASTContext &Ctx, case ShuffleVectorExprClass: case ConvertVectorExprClass: case AsTypeExprClass: + case CXXParenListInitExprClass: // These have a side-effect if any subexpression does. break; @@ -3580,7 +3758,7 @@ bool Expr::HasSideEffects(const ASTContext &Ctx, DCE->getCastKind() == CK_Dynamic) return true; } - LLVM_FALLTHROUGH; + [[fallthrough]]; case ImplicitCastExprClass: case CStyleCastExprClass: case CXXStaticCastExprClass: @@ -3603,10 +3781,18 @@ bool Expr::HasSideEffects(const ASTContext &Ctx, break; } - case CXXTypeidExprClass: - // typeid might throw if its subexpression is potentially-evaluated, so has - // side-effects in that case whether or not its subexpression does. - return cast<CXXTypeidExpr>(this)->isPotentiallyEvaluated(); + case CXXTypeidExprClass: { + const auto *TE = cast<CXXTypeidExpr>(this); + if (!TE->isPotentiallyEvaluated()) + return false; + + // If this type id expression can throw because of a null pointer, that is a + // side-effect independent of if the operand has a side-effect + if (IncludePossibleEffects && TE->hasNullCheck()) + return true; + + break; + } case CXXConstructExprClass: case CXXTemporaryObjectExprClass: { @@ -3725,9 +3911,14 @@ namespace { } void VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *E) { - if (E->getTemporary()->getDestructor()->isTrivial()) { - Inherited::VisitStmt(E); - return; + // Destructor of the temporary might be null if destructor declaration + // is not valid. + if (const CXXDestructorDecl *DtorDecl = + E->getTemporary()->getDestructor()) { + if (DtorDecl->isTrivial()) { + Inherited::VisitStmt(E); + return; + } } NonTrivial = true; @@ -3779,11 +3970,8 @@ Expr::isNullPointerConstant(ASTContext &Ctx, // has non-default address space it is not treated as nullptr. // (__generic void*)0 in OpenCL 2.0 should not be treated as nullptr // since it cannot be assigned to a pointer to constant address space. - if ((Ctx.getLangOpts().OpenCLVersion >= 200 && - Pointee.getAddressSpace() == LangAS::opencl_generic) || - (Ctx.getLangOpts().OpenCL && - Ctx.getLangOpts().OpenCLVersion < 200 && - Pointee.getAddressSpace() == LangAS::opencl_private)) + if (Ctx.getLangOpts().OpenCL && + Pointee.getAddressSpace() == Ctx.getDefaultOpenCLPointeeAddrSpace()) Qs.removeAddressSpace(); if (Pointee->isVoidType() && Qs.empty() && // to void* @@ -3831,7 +4019,7 @@ Expr::isNullPointerConstant(ASTContext &Ctx, if (getType().isNull()) return NPCK_NotNull; - // C++11 nullptr_t is always a null pointer constant. + // C++11/C23 nullptr_t is always a null pointer constant. if (getType()->isNullPtrType()) return NPCK_CXX11_nullptr; @@ -3958,6 +4146,13 @@ FieldDecl *Expr::getSourceBitField() { return nullptr; } +EnumConstantDecl *Expr::getEnumConstantDecl() { + Expr *E = this->IgnoreParenImpCasts(); + if (auto *DRE = dyn_cast<DeclRefExpr>(E)) + return dyn_cast<EnumConstantDecl>(DRE->getDecl()); + return nullptr; +} + bool Expr::refersToVectorElement() const { // FIXME: Why do we not just look at the ObjectKind here? const Expr *E = this->IgnoreParens(); @@ -4128,7 +4323,7 @@ bool ExtVectorElementExpr::containsDuplicateElements() const { Comp = Comp.substr(1); for (unsigned i = 0, e = Comp.size(); i != e; ++i) - if (Comp.substr(i + 1).find(Comp[i]) != StringRef::npos) + if (Comp.substr(i + 1).contains(Comp[i])) return true; return false; @@ -4196,18 +4391,48 @@ GenericSelectionExpr::GenericSelectionExpr( AssocExprs[ResultIndex]->getValueKind(), AssocExprs[ResultIndex]->getObjectKind()), NumAssocs(AssocExprs.size()), ResultIndex(ResultIndex), - DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) { + IsExprPredicate(true), DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) { + assert(AssocTypes.size() == AssocExprs.size() && + "Must have the same number of association expressions" + " and TypeSourceInfo!"); + assert(ResultIndex < NumAssocs && "ResultIndex is out-of-bounds!"); + + GenericSelectionExprBits.GenericLoc = GenericLoc; + getTrailingObjects<Stmt *>()[getIndexOfControllingExpression()] = + ControllingExpr; + std::copy(AssocExprs.begin(), AssocExprs.end(), + getTrailingObjects<Stmt *>() + getIndexOfStartOfAssociatedExprs()); + std::copy(AssocTypes.begin(), AssocTypes.end(), + getTrailingObjects<TypeSourceInfo *>() + + getIndexOfStartOfAssociatedTypes()); + + setDependence(computeDependence(this, ContainsUnexpandedParameterPack)); +} + +GenericSelectionExpr::GenericSelectionExpr( + const ASTContext &, SourceLocation GenericLoc, + TypeSourceInfo *ControllingType, ArrayRef<TypeSourceInfo *> AssocTypes, + ArrayRef<Expr *> AssocExprs, SourceLocation DefaultLoc, + SourceLocation RParenLoc, bool ContainsUnexpandedParameterPack, + unsigned ResultIndex) + : Expr(GenericSelectionExprClass, AssocExprs[ResultIndex]->getType(), + AssocExprs[ResultIndex]->getValueKind(), + AssocExprs[ResultIndex]->getObjectKind()), + NumAssocs(AssocExprs.size()), ResultIndex(ResultIndex), + IsExprPredicate(false), DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) { assert(AssocTypes.size() == AssocExprs.size() && "Must have the same number of association expressions" " and TypeSourceInfo!"); assert(ResultIndex < NumAssocs && "ResultIndex is out-of-bounds!"); GenericSelectionExprBits.GenericLoc = GenericLoc; - getTrailingObjects<Stmt *>()[ControllingIndex] = ControllingExpr; + getTrailingObjects<TypeSourceInfo *>()[getIndexOfControllingType()] = + ControllingType; std::copy(AssocExprs.begin(), AssocExprs.end(), - getTrailingObjects<Stmt *>() + AssocExprStartIndex); + getTrailingObjects<Stmt *>() + getIndexOfStartOfAssociatedExprs()); std::copy(AssocTypes.begin(), AssocTypes.end(), - getTrailingObjects<TypeSourceInfo *>()); + getTrailingObjects<TypeSourceInfo *>() + + getIndexOfStartOfAssociatedTypes()); setDependence(computeDependence(this, ContainsUnexpandedParameterPack)); } @@ -4220,17 +4445,44 @@ GenericSelectionExpr::GenericSelectionExpr( : Expr(GenericSelectionExprClass, Context.DependentTy, VK_PRValue, OK_Ordinary), NumAssocs(AssocExprs.size()), ResultIndex(ResultDependentIndex), - DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) { + IsExprPredicate(true), DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) { assert(AssocTypes.size() == AssocExprs.size() && "Must have the same number of association expressions" " and TypeSourceInfo!"); GenericSelectionExprBits.GenericLoc = GenericLoc; - getTrailingObjects<Stmt *>()[ControllingIndex] = ControllingExpr; + getTrailingObjects<Stmt *>()[getIndexOfControllingExpression()] = + ControllingExpr; std::copy(AssocExprs.begin(), AssocExprs.end(), - getTrailingObjects<Stmt *>() + AssocExprStartIndex); + getTrailingObjects<Stmt *>() + getIndexOfStartOfAssociatedExprs()); std::copy(AssocTypes.begin(), AssocTypes.end(), - getTrailingObjects<TypeSourceInfo *>()); + getTrailingObjects<TypeSourceInfo *>() + + getIndexOfStartOfAssociatedTypes()); + + setDependence(computeDependence(this, ContainsUnexpandedParameterPack)); +} + +GenericSelectionExpr::GenericSelectionExpr( + const ASTContext &Context, SourceLocation GenericLoc, + TypeSourceInfo *ControllingType, ArrayRef<TypeSourceInfo *> AssocTypes, + ArrayRef<Expr *> AssocExprs, SourceLocation DefaultLoc, + SourceLocation RParenLoc, bool ContainsUnexpandedParameterPack) + : Expr(GenericSelectionExprClass, Context.DependentTy, VK_PRValue, + OK_Ordinary), + NumAssocs(AssocExprs.size()), ResultIndex(ResultDependentIndex), + IsExprPredicate(false), DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) { + assert(AssocTypes.size() == AssocExprs.size() && + "Must have the same number of association expressions" + " and TypeSourceInfo!"); + + GenericSelectionExprBits.GenericLoc = GenericLoc; + getTrailingObjects<TypeSourceInfo *>()[getIndexOfControllingType()] = + ControllingType; + std::copy(AssocExprs.begin(), AssocExprs.end(), + getTrailingObjects<Stmt *>() + getIndexOfStartOfAssociatedExprs()); + std::copy(AssocTypes.begin(), AssocTypes.end(), + getTrailingObjects<TypeSourceInfo *>() + + getIndexOfStartOfAssociatedTypes()); setDependence(computeDependence(this, ContainsUnexpandedParameterPack)); } @@ -4266,6 +4518,35 @@ GenericSelectionExpr *GenericSelectionExpr::Create( RParenLoc, ContainsUnexpandedParameterPack); } +GenericSelectionExpr *GenericSelectionExpr::Create( + const ASTContext &Context, SourceLocation GenericLoc, + TypeSourceInfo *ControllingType, ArrayRef<TypeSourceInfo *> AssocTypes, + ArrayRef<Expr *> AssocExprs, SourceLocation DefaultLoc, + SourceLocation RParenLoc, bool ContainsUnexpandedParameterPack, + unsigned ResultIndex) { + unsigned NumAssocs = AssocExprs.size(); + void *Mem = Context.Allocate( + totalSizeToAlloc<Stmt *, TypeSourceInfo *>(1 + NumAssocs, NumAssocs), + alignof(GenericSelectionExpr)); + return new (Mem) GenericSelectionExpr( + Context, GenericLoc, ControllingType, AssocTypes, AssocExprs, DefaultLoc, + RParenLoc, ContainsUnexpandedParameterPack, ResultIndex); +} + +GenericSelectionExpr *GenericSelectionExpr::Create( + const ASTContext &Context, SourceLocation GenericLoc, + TypeSourceInfo *ControllingType, ArrayRef<TypeSourceInfo *> AssocTypes, + ArrayRef<Expr *> AssocExprs, SourceLocation DefaultLoc, + SourceLocation RParenLoc, bool ContainsUnexpandedParameterPack) { + unsigned NumAssocs = AssocExprs.size(); + void *Mem = Context.Allocate( + totalSizeToAlloc<Stmt *, TypeSourceInfo *>(1 + NumAssocs, NumAssocs), + alignof(GenericSelectionExpr)); + return new (Mem) GenericSelectionExpr( + Context, GenericLoc, ControllingType, AssocTypes, AssocExprs, DefaultLoc, + RParenLoc, ContainsUnexpandedParameterPack); +} + GenericSelectionExpr * GenericSelectionExpr::CreateEmpty(const ASTContext &Context, unsigned NumAssocs) { @@ -4279,11 +4560,11 @@ GenericSelectionExpr::CreateEmpty(const ASTContext &Context, // DesignatedInitExpr //===----------------------------------------------------------------------===// -IdentifierInfo *DesignatedInitExpr::Designator::getFieldName() const { - assert(Kind == FieldDesignator && "Only valid on a field designator"); - if (Field.NameOrField & 0x01) - return reinterpret_cast<IdentifierInfo *>(Field.NameOrField & ~0x01); - return getField()->getIdentifier(); +const IdentifierInfo *DesignatedInitExpr::Designator::getFieldName() const { + assert(isFieldDesignator() && "Only valid on a field designator"); + if (FieldInfo.NameOrField & 0x01) + return reinterpret_cast<IdentifierInfo *>(FieldInfo.NameOrField & ~0x01); + return getFieldDecl()->getIdentifier(); } DesignatedInitExpr::DesignatedInitExpr(const ASTContext &C, QualType Ty, @@ -4358,14 +4639,20 @@ SourceRange DesignatedInitExpr::getDesignatorsSourceRange() const { } SourceLocation DesignatedInitExpr::getBeginLoc() const { - SourceLocation StartLoc; auto *DIE = const_cast<DesignatedInitExpr *>(this); Designator &First = *DIE->getDesignator(0); - if (First.isFieldDesignator()) - StartLoc = GNUSyntax ? First.Field.FieldLoc : First.Field.DotLoc; - else - StartLoc = First.ArrayOrRange.LBracketLoc; - return StartLoc; + if (First.isFieldDesignator()) { + // Skip past implicit designators for anonymous structs/unions, since + // these do not have valid source locations. + for (unsigned int i = 0; i < DIE->size(); i++) { + Designator &Des = *DIE->getDesignator(i); + SourceLocation retval = GNUSyntax ? Des.getFieldLoc() : Des.getDotLoc(); + if (!retval.isValid()) + continue; + return retval; + } + } + return First.getLBracketLoc(); } SourceLocation DesignatedInitExpr::getEndLoc() const { @@ -4373,20 +4660,18 @@ SourceLocation DesignatedInitExpr::getEndLoc() const { } Expr *DesignatedInitExpr::getArrayIndex(const Designator& D) const { - assert(D.Kind == Designator::ArrayDesignator && "Requires array designator"); - return getSubExpr(D.ArrayOrRange.Index + 1); + assert(D.isArrayDesignator() && "Requires array designator"); + return getSubExpr(D.getArrayIndex() + 1); } Expr *DesignatedInitExpr::getArrayRangeStart(const Designator &D) const { - assert(D.Kind == Designator::ArrayRangeDesignator && - "Requires array range designator"); - return getSubExpr(D.ArrayOrRange.Index + 1); + assert(D.isArrayRangeDesignator() && "Requires array range designator"); + return getSubExpr(D.getArrayIndex() + 1); } Expr *DesignatedInitExpr::getArrayRangeEnd(const Designator &D) const { - assert(D.Kind == Designator::ArrayRangeDesignator && - "Requires array range designator"); - return getSubExpr(D.ArrayOrRange.Index + 2); + assert(D.isArrayRangeDesignator() && "Requires array range designator"); + return getSubExpr(D.getArrayIndex() + 2); } /// Replaces the designator at index @p Idx with the series @@ -4425,7 +4710,8 @@ DesignatedInitUpdateExpr::DesignatedInitUpdateExpr(const ASTContext &C, OK_Ordinary) { BaseAndUpdaterExprs[0] = baseExpr; - InitListExpr *ILE = new (C) InitListExpr(C, lBraceLoc, None, rBraceLoc); + InitListExpr *ILE = + new (C) InitListExpr(C, lBraceLoc, std::nullopt, rBraceLoc); ILE->setType(baseExpr->getType()); BaseAndUpdaterExprs[1] = ILE; @@ -4695,7 +4981,9 @@ unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) { case AO__atomic_load_n: return 2; + case AO__scoped_atomic_load_n: case AO__opencl_atomic_load: + case AO__hip_atomic_load: case AO__c11_atomic_store: case AO__c11_atomic_exchange: case AO__atomic_load: @@ -4707,6 +4995,7 @@ unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) { case AO__c11_atomic_fetch_and: case AO__c11_atomic_fetch_or: case AO__c11_atomic_fetch_xor: + case AO__c11_atomic_fetch_nand: case AO__c11_atomic_fetch_max: case AO__c11_atomic_fetch_min: case AO__atomic_fetch_add: @@ -4727,7 +5016,36 @@ unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) { case AO__atomic_fetch_max: return 3; + case AO__scoped_atomic_load: + case AO__scoped_atomic_store: + case AO__scoped_atomic_store_n: + case AO__scoped_atomic_fetch_add: + case AO__scoped_atomic_fetch_sub: + case AO__scoped_atomic_fetch_and: + case AO__scoped_atomic_fetch_or: + case AO__scoped_atomic_fetch_xor: + case AO__scoped_atomic_fetch_nand: + case AO__scoped_atomic_add_fetch: + case AO__scoped_atomic_sub_fetch: + case AO__scoped_atomic_and_fetch: + case AO__scoped_atomic_or_fetch: + case AO__scoped_atomic_xor_fetch: + case AO__scoped_atomic_nand_fetch: + case AO__scoped_atomic_min_fetch: + case AO__scoped_atomic_max_fetch: + case AO__scoped_atomic_fetch_min: + case AO__scoped_atomic_fetch_max: + case AO__scoped_atomic_exchange_n: + case AO__hip_atomic_exchange: + case AO__hip_atomic_fetch_add: + case AO__hip_atomic_fetch_sub: + case AO__hip_atomic_fetch_and: + case AO__hip_atomic_fetch_or: + case AO__hip_atomic_fetch_xor: + case AO__hip_atomic_fetch_min: + case AO__hip_atomic_fetch_max: case AO__opencl_atomic_store: + case AO__hip_atomic_store: case AO__opencl_atomic_exchange: case AO__opencl_atomic_fetch_add: case AO__opencl_atomic_fetch_sub: @@ -4739,15 +5057,21 @@ unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) { case AO__atomic_exchange: return 4; + case AO__scoped_atomic_exchange: case AO__c11_atomic_compare_exchange_strong: case AO__c11_atomic_compare_exchange_weak: return 5; - + case AO__hip_atomic_compare_exchange_strong: case AO__opencl_atomic_compare_exchange_strong: case AO__opencl_atomic_compare_exchange_weak: + case AO__hip_atomic_compare_exchange_weak: case AO__atomic_compare_exchange: case AO__atomic_compare_exchange_n: return 6; + + case AO__scoped_atomic_compare_exchange: + case AO__scoped_atomic_compare_exchange_n: + return 7; } llvm_unreachable("unknown atomic op"); } @@ -4759,9 +5083,9 @@ QualType AtomicExpr::getValueType() const { return T; } -QualType OMPArraySectionExpr::getBaseOriginalType(const Expr *Base) { +QualType ArraySectionExpr::getBaseOriginalType(const Expr *Base) { unsigned ArraySectionCount = 0; - while (auto *OASE = dyn_cast<OMPArraySectionExpr>(Base->IgnoreParens())) { + while (auto *OASE = dyn_cast<ArraySectionExpr>(Base->IgnoreParens())) { Base = OASE->getBase(); ++ArraySectionCount; } @@ -4779,10 +5103,10 @@ QualType OMPArraySectionExpr::getBaseOriginalType(const Expr *Base) { for (unsigned Cnt = 0; Cnt < ArraySectionCount; ++Cnt) { if (OriginalTy->isAnyPointerType()) OriginalTy = OriginalTy->getPointeeType(); - else { - assert (OriginalTy->isArrayType()); + else if (OriginalTy->isArrayType()) OriginalTy = OriginalTy->castAsArrayTypeUnsafe()->getElementType(); - } + else + return {}; } return OriginalTy; } @@ -4794,7 +5118,7 @@ RecoveryExpr::RecoveryExpr(ASTContext &Ctx, QualType T, SourceLocation BeginLoc, OK_Ordinary), BeginLoc(BeginLoc), EndLoc(EndLoc), NumExprs(SubExprs.size()) { assert(!T.isNull()); - assert(llvm::all_of(SubExprs, [](Expr* E) { return E != nullptr; })); + assert(!llvm::is_contained(SubExprs, nullptr)); llvm::copy(SubExprs, getTrailingObjects<Expr *>()); setDependence(computeDependence(this)); diff --git a/contrib/llvm-project/clang/lib/AST/ExprCXX.cpp b/contrib/llvm-project/clang/lib/AST/ExprCXX.cpp index c98cfd74dab0..45e2badf2ddd 100644 --- a/contrib/llvm-project/clang/lib/AST/ExprCXX.cpp +++ b/contrib/llvm-project/clang/lib/AST/ExprCXX.cpp @@ -38,6 +38,7 @@ #include <cstddef> #include <cstring> #include <memory> +#include <optional> using namespace clang; @@ -110,7 +111,7 @@ CXXRewrittenBinaryOperator::getDecomposedForm() const { return Result; // Otherwise, we expect a <=> to now be on the LHS. - E = Result.LHS->IgnoreImplicitAsWritten(); + E = Result.LHS->IgnoreUnlessSpelledInSource(); if (auto *BO = dyn_cast<BinaryOperator>(E)) { assert(BO->getOpcode() == BO_Cmp); Result.LHS = BO->getLHS(); @@ -165,6 +166,53 @@ QualType CXXTypeidExpr::getTypeOperand(ASTContext &Context) const { Operand.get<TypeSourceInfo *>()->getType().getNonReferenceType(), Quals); } +static bool isGLValueFromPointerDeref(const Expr *E) { + E = E->IgnoreParens(); + + if (const auto *CE = dyn_cast<CastExpr>(E)) { + if (!CE->getSubExpr()->isGLValue()) + return false; + return isGLValueFromPointerDeref(CE->getSubExpr()); + } + + if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) + return isGLValueFromPointerDeref(OVE->getSourceExpr()); + + if (const auto *BO = dyn_cast<BinaryOperator>(E)) + if (BO->getOpcode() == BO_Comma) + return isGLValueFromPointerDeref(BO->getRHS()); + + if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(E)) + return isGLValueFromPointerDeref(ACO->getTrueExpr()) || + isGLValueFromPointerDeref(ACO->getFalseExpr()); + + // C++11 [expr.sub]p1: + // The expression E1[E2] is identical (by definition) to *((E1)+(E2)) + if (isa<ArraySubscriptExpr>(E)) + return true; + + if (const auto *UO = dyn_cast<UnaryOperator>(E)) + if (UO->getOpcode() == UO_Deref) + return true; + + return false; +} + +bool CXXTypeidExpr::hasNullCheck() const { + if (!isPotentiallyEvaluated()) + return false; + + // C++ [expr.typeid]p2: + // If the glvalue expression is obtained by applying the unary * operator to + // a pointer and the pointer is a null pointer value, the typeid expression + // throws the std::bad_typeid exception. + // + // However, this paragraph's intent is not clear. We choose a very generous + // interpretation which implores us to consider comma operators, conditional + // operators, parentheses and other such constructs. + return isGLValueFromPointerDeref(getExprOperand()); +} + QualType CXXUuidofExpr::getTypeOperand(ASTContext &Context) const { assert(isTypeOperand() && "Cannot call getTypeOperand for __uuidof(expr)"); Qualifiers Quals; @@ -182,8 +230,8 @@ CXXNewExpr::CXXNewExpr(bool IsGlobalNew, FunctionDecl *OperatorNew, FunctionDecl *OperatorDelete, bool ShouldPassAlignment, bool UsualArrayDeleteWantsSize, ArrayRef<Expr *> PlacementArgs, SourceRange TypeIdParens, - Optional<Expr *> ArraySize, - InitializationStyle InitializationStyle, + std::optional<Expr *> ArraySize, + CXXNewInitializationStyle InitializationStyle, Expr *Initializer, QualType Ty, TypeSourceInfo *AllocatedTypeInfo, SourceRange Range, SourceRange DirectInitRange) @@ -192,15 +240,17 @@ CXXNewExpr::CXXNewExpr(bool IsGlobalNew, FunctionDecl *OperatorNew, AllocatedTypeInfo(AllocatedTypeInfo), Range(Range), DirectInitRange(DirectInitRange) { - assert((Initializer != nullptr || InitializationStyle == NoInit) && - "Only NoInit can have no initializer!"); + assert((Initializer != nullptr || + InitializationStyle == CXXNewInitializationStyle::None) && + "Only CXXNewInitializationStyle::None can have no initializer!"); CXXNewExprBits.IsGlobalNew = IsGlobalNew; - CXXNewExprBits.IsArray = ArraySize.hasValue(); + CXXNewExprBits.IsArray = ArraySize.has_value(); CXXNewExprBits.ShouldPassAlignment = ShouldPassAlignment; CXXNewExprBits.UsualArrayDeleteWantsSize = UsualArrayDeleteWantsSize; + CXXNewExprBits.HasInitializer = Initializer != nullptr; CXXNewExprBits.StoredInitializationStyle = - Initializer ? InitializationStyle + 1 : 0; + llvm::to_underlying(InitializationStyle); bool IsParenTypeId = TypeIdParens.isValid(); CXXNewExprBits.IsParenTypeId = IsParenTypeId; CXXNewExprBits.NumPlacementArgs = PlacementArgs.size(); @@ -216,10 +266,10 @@ CXXNewExpr::CXXNewExpr(bool IsGlobalNew, FunctionDecl *OperatorNew, getTrailingObjects<SourceRange>()[0] = TypeIdParens; switch (getInitializationStyle()) { - case CallInit: + case CXXNewInitializationStyle::Parens: this->Range.setEnd(DirectInitRange.getEnd()); break; - case ListInit: + case CXXNewInitializationStyle::Braces: this->Range.setEnd(getInitializer()->getSourceRange().getEnd()); break; default: @@ -239,16 +289,15 @@ CXXNewExpr::CXXNewExpr(EmptyShell Empty, bool IsArray, CXXNewExprBits.IsParenTypeId = IsParenTypeId; } -CXXNewExpr * -CXXNewExpr::Create(const ASTContext &Ctx, bool IsGlobalNew, - FunctionDecl *OperatorNew, FunctionDecl *OperatorDelete, - bool ShouldPassAlignment, bool UsualArrayDeleteWantsSize, - ArrayRef<Expr *> PlacementArgs, SourceRange TypeIdParens, - Optional<Expr *> ArraySize, - InitializationStyle InitializationStyle, Expr *Initializer, - QualType Ty, TypeSourceInfo *AllocatedTypeInfo, - SourceRange Range, SourceRange DirectInitRange) { - bool IsArray = ArraySize.hasValue(); +CXXNewExpr *CXXNewExpr::Create( + const ASTContext &Ctx, bool IsGlobalNew, FunctionDecl *OperatorNew, + FunctionDecl *OperatorDelete, bool ShouldPassAlignment, + bool UsualArrayDeleteWantsSize, ArrayRef<Expr *> PlacementArgs, + SourceRange TypeIdParens, std::optional<Expr *> ArraySize, + CXXNewInitializationStyle InitializationStyle, Expr *Initializer, + QualType Ty, TypeSourceInfo *AllocatedTypeInfo, SourceRange Range, + SourceRange DirectInitRange) { + bool IsArray = ArraySize.has_value(); bool HasInit = Initializer != nullptr; unsigned NumPlacementArgs = PlacementArgs.size(); bool IsParenTypeId = TypeIdParens.isValid(); @@ -275,6 +324,8 @@ CXXNewExpr *CXXNewExpr::CreateEmpty(const ASTContext &Ctx, bool IsArray, } bool CXXNewExpr::shouldNullCheckAllocation() const { + if (getOperatorNew()->getLangOpts().CheckNew) + return true; return !getOperatorNew()->hasAttr<ReturnsNonNullAttr>() && getOperatorNew() ->getType() @@ -314,7 +365,7 @@ QualType CXXDeleteExpr::getDestroyedType() const { // CXXPseudoDestructorExpr PseudoDestructorTypeStorage::PseudoDestructorTypeStorage(TypeSourceInfo *Info) : Type(Info) { - Location = Info->getTypeLoc().getLocalSourceRange().getBegin(); + Location = Info->getTypeLoc().getBeginLoc(); } CXXPseudoDestructorExpr::CXXPseudoDestructorExpr( @@ -341,7 +392,7 @@ QualType CXXPseudoDestructorExpr::getDestroyedType() const { SourceLocation CXXPseudoDestructorExpr::getEndLoc() const { SourceLocation End = DestroyedType.getLocation(); if (TypeSourceInfo *TInfo = DestroyedType.getTypeSourceInfo()) - End = TInfo->getTypeLoc().getLocalSourceRange().getEnd(); + End = TInfo->getTypeLoc().getSourceRange().getEnd(); return End; } @@ -349,15 +400,15 @@ SourceLocation CXXPseudoDestructorExpr::getEndLoc() const { UnresolvedLookupExpr::UnresolvedLookupExpr( const ASTContext &Context, CXXRecordDecl *NamingClass, NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, - const DeclarationNameInfo &NameInfo, bool RequiresADL, bool Overloaded, + const DeclarationNameInfo &NameInfo, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs, UnresolvedSetIterator Begin, - UnresolvedSetIterator End) + UnresolvedSetIterator End, bool KnownDependent, + bool KnownInstantiationDependent) : OverloadExpr(UnresolvedLookupExprClass, Context, QualifierLoc, - TemplateKWLoc, NameInfo, TemplateArgs, Begin, End, false, - false, false), + TemplateKWLoc, NameInfo, TemplateArgs, Begin, End, + KnownDependent, KnownInstantiationDependent, false), NamingClass(NamingClass) { UnresolvedLookupExprBits.RequiresADL = RequiresADL; - UnresolvedLookupExprBits.Overloaded = Overloaded; } UnresolvedLookupExpr::UnresolvedLookupExpr(EmptyShell Empty, @@ -369,15 +420,17 @@ UnresolvedLookupExpr::UnresolvedLookupExpr(EmptyShell Empty, UnresolvedLookupExpr *UnresolvedLookupExpr::Create( const ASTContext &Context, CXXRecordDecl *NamingClass, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, - bool RequiresADL, bool Overloaded, UnresolvedSetIterator Begin, - UnresolvedSetIterator End) { + bool RequiresADL, UnresolvedSetIterator Begin, UnresolvedSetIterator End, + bool KnownDependent, bool KnownInstantiationDependent) { unsigned NumResults = End - Begin; unsigned Size = totalSizeToAlloc<DeclAccessPair, ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(NumResults, 0, 0); void *Mem = Context.Allocate(Size, alignof(UnresolvedLookupExpr)); - return new (Mem) UnresolvedLookupExpr(Context, NamingClass, QualifierLoc, - SourceLocation(), NameInfo, RequiresADL, - Overloaded, nullptr, Begin, End); + return new (Mem) UnresolvedLookupExpr( + Context, NamingClass, QualifierLoc, + /*TemplateKWLoc=*/SourceLocation(), NameInfo, RequiresADL, + /*TemplateArgs=*/nullptr, Begin, End, KnownDependent, + KnownInstantiationDependent); } UnresolvedLookupExpr *UnresolvedLookupExpr::Create( @@ -385,17 +438,18 @@ UnresolvedLookupExpr *UnresolvedLookupExpr::Create( NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool RequiresADL, const TemplateArgumentListInfo *Args, UnresolvedSetIterator Begin, - UnresolvedSetIterator End) { - assert(Args || TemplateKWLoc.isValid()); + UnresolvedSetIterator End, bool KnownDependent, + bool KnownInstantiationDependent) { unsigned NumResults = End - Begin; + bool HasTemplateKWAndArgsInfo = Args || TemplateKWLoc.isValid(); unsigned NumTemplateArgs = Args ? Args->size() : 0; - unsigned Size = - totalSizeToAlloc<DeclAccessPair, ASTTemplateKWAndArgsInfo, - TemplateArgumentLoc>(NumResults, 1, NumTemplateArgs); + unsigned Size = totalSizeToAlloc<DeclAccessPair, ASTTemplateKWAndArgsInfo, + TemplateArgumentLoc>( + NumResults, HasTemplateKWAndArgsInfo, NumTemplateArgs); void *Mem = Context.Allocate(Size, alignof(UnresolvedLookupExpr)); - return new (Mem) UnresolvedLookupExpr(Context, NamingClass, QualifierLoc, - TemplateKWLoc, NameInfo, RequiresADL, - /*Overloaded*/ true, Args, Begin, End); + return new (Mem) UnresolvedLookupExpr( + Context, NamingClass, QualifierLoc, TemplateKWLoc, NameInfo, RequiresADL, + Args, Begin, End, KnownDependent, KnownInstantiationDependent); } UnresolvedLookupExpr *UnresolvedLookupExpr::CreateEmpty( @@ -507,14 +561,14 @@ DependentScopeDeclRefExpr::CreateEmpty(const ASTContext &Context, } SourceLocation CXXConstructExpr::getBeginLoc() const { - if (isa<CXXTemporaryObjectExpr>(this)) - return cast<CXXTemporaryObjectExpr>(this)->getBeginLoc(); + if (const auto *TOE = dyn_cast<CXXTemporaryObjectExpr>(this)) + return TOE->getBeginLoc(); return getLocation(); } SourceLocation CXXConstructExpr::getEndLoc() const { - if (isa<CXXTemporaryObjectExpr>(this)) - return cast<CXXTemporaryObjectExpr>(this)->getEndLoc(); + if (const auto *TOE = dyn_cast<CXXTemporaryObjectExpr>(this)) + return TOE->getEndLoc(); if (ParenOrBraceRange.isValid()) return ParenOrBraceRange.getEnd(); @@ -764,29 +818,35 @@ CXXDynamicCastExpr *CXXDynamicCastExpr::CreateEmpty(const ASTContext &C, /// struct C { }; /// /// C *f(B* b) { return dynamic_cast<C*>(b); } -bool CXXDynamicCastExpr::isAlwaysNull() const -{ +bool CXXDynamicCastExpr::isAlwaysNull() const { + if (isValueDependent() || getCastKind() != CK_Dynamic) + return false; + QualType SrcType = getSubExpr()->getType(); QualType DestType = getType(); - if (const auto *SrcPTy = SrcType->getAs<PointerType>()) { - SrcType = SrcPTy->getPointeeType(); - DestType = DestType->castAs<PointerType>()->getPointeeType(); - } - - if (DestType->isVoidType()) + if (DestType->isVoidPointerType()) return false; - const auto *SrcRD = - cast<CXXRecordDecl>(SrcType->castAs<RecordType>()->getDecl()); + if (DestType->isPointerType()) { + SrcType = SrcType->getPointeeType(); + DestType = DestType->getPointeeType(); + } - if (!SrcRD->hasAttr<FinalAttr>()) - return false; + const auto *SrcRD = SrcType->getAsCXXRecordDecl(); + const auto *DestRD = DestType->getAsCXXRecordDecl(); + assert(SrcRD && DestRD); - const auto *DestRD = - cast<CXXRecordDecl>(DestType->castAs<RecordType>()->getDecl()); + if (SrcRD->isEffectivelyFinal()) { + assert(!SrcRD->isDerivedFrom(DestRD) && + "upcasts should not use CK_Dynamic"); + return true; + } + + if (DestRD->isEffectivelyFinal() && !DestRD->isDerivedFrom(SrcRD)) + return true; - return !DestRD->isDerivedFrom(SrcRD); + return false; } CXXReinterpretCastExpr * @@ -949,9 +1009,43 @@ const IdentifierInfo *UserDefinedLiteral::getUDSuffix() const { return cast<FunctionDecl>(getCalleeDecl())->getLiteralIdentifier(); } +CXXDefaultArgExpr *CXXDefaultArgExpr::CreateEmpty(const ASTContext &C, + bool HasRewrittenInit) { + size_t Size = totalSizeToAlloc<Expr *>(HasRewrittenInit); + auto *Mem = C.Allocate(Size, alignof(CXXDefaultArgExpr)); + return new (Mem) CXXDefaultArgExpr(EmptyShell(), HasRewrittenInit); +} + +CXXDefaultArgExpr *CXXDefaultArgExpr::Create(const ASTContext &C, + SourceLocation Loc, + ParmVarDecl *Param, + Expr *RewrittenExpr, + DeclContext *UsedContext) { + size_t Size = totalSizeToAlloc<Expr *>(RewrittenExpr != nullptr); + auto *Mem = C.Allocate(Size, alignof(CXXDefaultArgExpr)); + return new (Mem) CXXDefaultArgExpr(CXXDefaultArgExprClass, Loc, Param, + RewrittenExpr, UsedContext); +} + +Expr *CXXDefaultArgExpr::getExpr() { + return CXXDefaultArgExprBits.HasRewrittenInit ? getAdjustedRewrittenExpr() + : getParam()->getDefaultArg(); +} + +Expr *CXXDefaultArgExpr::getAdjustedRewrittenExpr() { + assert(hasRewrittenInit() && + "expected this CXXDefaultArgExpr to have a rewritten init."); + Expr *Init = getRewrittenExpr(); + if (auto *E = dyn_cast_if_present<FullExpr>(Init)) + if (!isa<ConstantExpr>(E)) + return E->getSubExpr(); + return Init; +} + CXXDefaultInitExpr::CXXDefaultInitExpr(const ASTContext &Ctx, SourceLocation Loc, FieldDecl *Field, - QualType Ty, DeclContext *UsedContext) + QualType Ty, DeclContext *UsedContext, + Expr *RewrittenInitExpr) : Expr(CXXDefaultInitExprClass, Ty.getNonLValueExprType(Ctx), Ty->isLValueReferenceType() ? VK_LValue : Ty->isRValueReferenceType() ? VK_XValue @@ -959,11 +1053,43 @@ CXXDefaultInitExpr::CXXDefaultInitExpr(const ASTContext &Ctx, /*FIXME*/ OK_Ordinary), Field(Field), UsedContext(UsedContext) { CXXDefaultInitExprBits.Loc = Loc; + CXXDefaultInitExprBits.HasRewrittenInit = RewrittenInitExpr != nullptr; + + if (CXXDefaultInitExprBits.HasRewrittenInit) + *getTrailingObjects<Expr *>() = RewrittenInitExpr; + assert(Field->hasInClassInitializer()); setDependence(computeDependence(this)); } +CXXDefaultInitExpr *CXXDefaultInitExpr::CreateEmpty(const ASTContext &C, + bool HasRewrittenInit) { + size_t Size = totalSizeToAlloc<Expr *>(HasRewrittenInit); + auto *Mem = C.Allocate(Size, alignof(CXXDefaultInitExpr)); + return new (Mem) CXXDefaultInitExpr(EmptyShell(), HasRewrittenInit); +} + +CXXDefaultInitExpr *CXXDefaultInitExpr::Create(const ASTContext &Ctx, + SourceLocation Loc, + FieldDecl *Field, + DeclContext *UsedContext, + Expr *RewrittenInitExpr) { + + size_t Size = totalSizeToAlloc<Expr *>(RewrittenInitExpr != nullptr); + auto *Mem = Ctx.Allocate(Size, alignof(CXXDefaultInitExpr)); + return new (Mem) CXXDefaultInitExpr(Ctx, Loc, Field, Field->getType(), + UsedContext, RewrittenInitExpr); +} + +Expr *CXXDefaultInitExpr::getExpr() { + assert(Field->getInClassInitializer() && "initializer hasn't been parsed"); + if (hasRewrittenInit()) + return getRewrittenExpr(); + + return Field->getInClassInitializer(); +} + CXXTemporary *CXXTemporary::Create(const ASTContext &C, const CXXDestructorDecl *Destructor) { return new (C) CXXTemporary(Destructor); @@ -988,8 +1114,10 @@ CXXTemporaryObjectExpr::CXXTemporaryObjectExpr( CXXTemporaryObjectExprClass, Ty, TSI->getTypeLoc().getBeginLoc(), Cons, /* Elidable=*/false, Args, HadMultipleCandidates, ListInitialization, StdInitListInitialization, ZeroInitialization, - CXXConstructExpr::CK_Complete, ParenOrBraceRange), - TSI(TSI) {} + CXXConstructionKind::Complete, ParenOrBraceRange), + TSI(TSI) { + setDependence(computeDependence(this)); +} CXXTemporaryObjectExpr::CXXTemporaryObjectExpr(EmptyShell Empty, unsigned NumArgs) @@ -1034,7 +1162,7 @@ CXXConstructExpr *CXXConstructExpr::Create( CXXConstructorDecl *Ctor, bool Elidable, ArrayRef<Expr *> Args, bool HadMultipleCandidates, bool ListInitialization, bool StdInitListInitialization, bool ZeroInitialization, - ConstructionKind ConstructKind, SourceRange ParenOrBraceRange) { + CXXConstructionKind ConstructKind, SourceRange ParenOrBraceRange) { unsigned SizeOfTrailingObjects = sizeOfTrailingObjects(Args.size()); void *Mem = Ctx.Allocate(sizeof(CXXConstructExpr) + SizeOfTrailingObjects, alignof(CXXConstructExpr)); @@ -1057,7 +1185,7 @@ CXXConstructExpr::CXXConstructExpr( StmtClass SC, QualType Ty, SourceLocation Loc, CXXConstructorDecl *Ctor, bool Elidable, ArrayRef<Expr *> Args, bool HadMultipleCandidates, bool ListInitialization, bool StdInitListInitialization, - bool ZeroInitialization, ConstructionKind ConstructKind, + bool ZeroInitialization, CXXConstructionKind ConstructKind, SourceRange ParenOrBraceRange) : Expr(SC, Ty, VK_PRValue, OK_Ordinary), Constructor(Ctor), ParenOrBraceRange(ParenOrBraceRange), NumArgs(Args.size()) { @@ -1066,7 +1194,8 @@ CXXConstructExpr::CXXConstructExpr( CXXConstructExprBits.ListInitialization = ListInitialization; CXXConstructExprBits.StdInitListInitialization = StdInitListInitialization; CXXConstructExprBits.ZeroInitialization = ZeroInitialization; - CXXConstructExprBits.ConstructionKind = ConstructKind; + CXXConstructExprBits.ConstructionKind = llvm::to_underlying(ConstructKind); + CXXConstructExprBits.IsImmediateEscalating = false; CXXConstructExprBits.Loc = Loc; Stmt **TrailingArgs = getTrailingArgs(); @@ -1075,7 +1204,9 @@ CXXConstructExpr::CXXConstructExpr( TrailingArgs[I] = Args[I]; } - setDependence(computeDependence(this)); + // CXXTemporaryObjectExpr does this itself after setting its TypeSourceInfo. + if (SC == CXXConstructExprClass) + setDependence(computeDependence(this)); } CXXConstructExpr::CXXConstructExpr(StmtClass SC, EmptyShell Empty, @@ -1083,7 +1214,7 @@ CXXConstructExpr::CXXConstructExpr(StmtClass SC, EmptyShell Empty, : Expr(SC, Empty), NumArgs(NumArgs) {} LambdaCapture::LambdaCapture(SourceLocation Loc, bool Implicit, - LambdaCaptureKind Kind, VarDecl *Var, + LambdaCaptureKind Kind, ValueDecl *Var, SourceLocation EllipsisLoc) : DeclAndBits(Var, 0), Loc(Loc), EllipsisLoc(EllipsisLoc) { unsigned Bits = 0; @@ -1093,7 +1224,7 @@ LambdaCapture::LambdaCapture(SourceLocation Loc, bool Implicit, switch (Kind) { case LCK_StarThis: Bits |= Capture_ByCopy; - LLVM_FALLTHROUGH; + [[fallthrough]]; case LCK_This: assert(!Var && "'this' capture cannot have a variable!"); Bits |= Capture_This; @@ -1101,7 +1232,7 @@ LambdaCapture::LambdaCapture(SourceLocation Loc, bool Implicit, case LCK_ByCopy: Bits |= Capture_ByCopy; - LLVM_FALLTHROUGH; + [[fallthrough]]; case LCK_ByRef: assert(Var && "capture must have a variable!"); break; @@ -1207,16 +1338,16 @@ const CompoundStmt *LambdaExpr::getCompoundStmtBody() const { } bool LambdaExpr::isInitCapture(const LambdaCapture *C) const { - return (C->capturesVariable() && C->getCapturedVar()->isInitCapture() && - (getCallOperator() == C->getCapturedVar()->getDeclContext())); + return C->capturesVariable() && C->getCapturedVar()->isInitCapture() && + getCallOperator() == C->getCapturedVar()->getDeclContext(); } LambdaExpr::capture_iterator LambdaExpr::capture_begin() const { - return getLambdaClass()->getLambdaData().Captures; + return getLambdaClass()->captures_begin(); } LambdaExpr::capture_iterator LambdaExpr::capture_end() const { - return capture_begin() + capture_size(); + return getLambdaClass()->captures_end(); } LambdaExpr::capture_range LambdaExpr::captures() const { @@ -1228,9 +1359,8 @@ LambdaExpr::capture_iterator LambdaExpr::explicit_capture_begin() const { } LambdaExpr::capture_iterator LambdaExpr::explicit_capture_end() const { - struct CXXRecordDecl::LambdaDefinitionData &Data - = getLambdaClass()->getLambdaData(); - return Data.Captures + Data.NumExplicitCaptures; + return capture_begin() + + getLambdaClass()->getLambdaData().NumExplicitCaptures; } LambdaExpr::capture_range LambdaExpr::explicit_captures() const { @@ -1322,17 +1452,16 @@ ExprWithCleanups *ExprWithCleanups::Create(const ASTContext &C, return new (buffer) ExprWithCleanups(empty, numObjects); } -CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(QualType T, - TypeSourceInfo *TSI, - SourceLocation LParenLoc, - ArrayRef<Expr *> Args, - SourceLocation RParenLoc) +CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr( + QualType T, TypeSourceInfo *TSI, SourceLocation LParenLoc, + ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool IsListInit) : Expr(CXXUnresolvedConstructExprClass, T, (TSI->getType()->isLValueReferenceType() ? VK_LValue : TSI->getType()->isRValueReferenceType() ? VK_XValue : VK_PRValue), OK_Ordinary), - TSI(TSI), LParenLoc(LParenLoc), RParenLoc(RParenLoc) { + TypeAndInitForm(TSI, IsListInit), LParenLoc(LParenLoc), + RParenLoc(RParenLoc) { CXXUnresolvedConstructExprBits.NumArgs = Args.size(); auto **StoredArgs = getTrailingObjects<Expr *>(); for (unsigned I = 0; I != Args.size(); ++I) @@ -1341,11 +1470,12 @@ CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(QualType T, } CXXUnresolvedConstructExpr *CXXUnresolvedConstructExpr::Create( - const ASTContext &Context, QualType T, TypeSourceInfo *TSI, SourceLocation LParenLoc, - ArrayRef<Expr *> Args, SourceLocation RParenLoc) { + const ASTContext &Context, QualType T, TypeSourceInfo *TSI, + SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, + bool IsListInit) { void *Mem = Context.Allocate(totalSizeToAlloc<Expr *>(Args.size())); - return new (Mem) - CXXUnresolvedConstructExpr(T, TSI, LParenLoc, Args, RParenLoc); + return new (Mem) CXXUnresolvedConstructExpr(T, TSI, LParenLoc, Args, + RParenLoc, IsListInit); } CXXUnresolvedConstructExpr * @@ -1356,7 +1486,7 @@ CXXUnresolvedConstructExpr::CreateEmpty(const ASTContext &Context, } SourceLocation CXXUnresolvedConstructExpr::getBeginLoc() const { - return TSI->getTypeLoc().getBeginLoc(); + return TypeAndInitForm.getPointer()->getTypeLoc().getBeginLoc(); } CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr( @@ -1436,6 +1566,16 @@ CXXDependentScopeMemberExpr *CXXDependentScopeMemberExpr::CreateEmpty( EmptyShell(), HasTemplateKWAndArgsInfo, HasFirstQualifierFoundInScope); } +CXXThisExpr *CXXThisExpr::Create(const ASTContext &Ctx, SourceLocation L, + QualType Ty, bool IsImplicit) { + return new (Ctx) CXXThisExpr(L, Ty, IsImplicit, + Ctx.getLangOpts().HLSL ? VK_LValue : VK_PRValue); +} + +CXXThisExpr *CXXThisExpr::CreateEmpty(const ASTContext &Ctx) { + return new (Ctx) CXXThisExpr(EmptyShell()); +} + static bool hasOnlyNonStaticMemberFunctions(UnresolvedSetIterator begin, UnresolvedSetIterator end) { do { @@ -1551,12 +1691,12 @@ CXXRecordDecl *UnresolvedMemberExpr::getNamingClass() { return Record; } -SizeOfPackExpr * -SizeOfPackExpr::Create(ASTContext &Context, SourceLocation OperatorLoc, - NamedDecl *Pack, SourceLocation PackLoc, - SourceLocation RParenLoc, - Optional<unsigned> Length, - ArrayRef<TemplateArgument> PartialArgs) { +SizeOfPackExpr *SizeOfPackExpr::Create(ASTContext &Context, + SourceLocation OperatorLoc, + NamedDecl *Pack, SourceLocation PackLoc, + SourceLocation RParenLoc, + std::optional<unsigned> Length, + ArrayRef<TemplateArgument> PartialArgs) { void *Storage = Context.Allocate(totalSizeToAlloc<TemplateArgument>(PartialArgs.size())); return new (Storage) SizeOfPackExpr(Context.getSizeType(), OperatorLoc, Pack, @@ -1570,6 +1710,46 @@ SizeOfPackExpr *SizeOfPackExpr::CreateDeserialized(ASTContext &Context, return new (Storage) SizeOfPackExpr(EmptyShell(), NumPartialArgs); } +NonTypeTemplateParmDecl *SubstNonTypeTemplateParmExpr::getParameter() const { + return cast<NonTypeTemplateParmDecl>( + getReplacedTemplateParameterList(getAssociatedDecl())->asArray()[Index]); +} + +PackIndexingExpr *PackIndexingExpr::Create( + ASTContext &Context, SourceLocation EllipsisLoc, SourceLocation RSquareLoc, + Expr *PackIdExpr, Expr *IndexExpr, std::optional<int64_t> Index, + ArrayRef<Expr *> SubstitutedExprs, bool ExpandedToEmptyPack) { + QualType Type; + if (Index && !SubstitutedExprs.empty()) + Type = SubstitutedExprs[*Index]->getType(); + else + Type = Context.DependentTy; + + void *Storage = + Context.Allocate(totalSizeToAlloc<Expr *>(SubstitutedExprs.size())); + return new (Storage) + PackIndexingExpr(Type, EllipsisLoc, RSquareLoc, PackIdExpr, IndexExpr, + SubstitutedExprs, ExpandedToEmptyPack); +} + +NamedDecl *PackIndexingExpr::getPackDecl() const { + if (auto *D = dyn_cast<DeclRefExpr>(getPackIdExpression()); D) { + NamedDecl *ND = dyn_cast<NamedDecl>(D->getDecl()); + assert(ND && "exected a named decl"); + return ND; + } + assert(false && "invalid declaration kind in pack indexing expression"); + return nullptr; +} + +PackIndexingExpr * +PackIndexingExpr::CreateDeserialized(ASTContext &Context, + unsigned NumTransformedExprs) { + void *Storage = + Context.Allocate(totalSizeToAlloc<Expr *>(NumTransformedExprs)); + return new (Storage) PackIndexingExpr(EmptyShell{}); +} + QualType SubstNonTypeTemplateParmExpr::getParameterType( const ASTContext &Context) const { // Note that, for a class type NTTP, we will have an lvalue of type 'const @@ -1580,17 +1760,24 @@ QualType SubstNonTypeTemplateParmExpr::getParameterType( } SubstNonTypeTemplateParmPackExpr::SubstNonTypeTemplateParmPackExpr( - QualType T, ExprValueKind ValueKind, NonTypeTemplateParmDecl *Param, - SourceLocation NameLoc, const TemplateArgument &ArgPack) + QualType T, ExprValueKind ValueKind, SourceLocation NameLoc, + const TemplateArgument &ArgPack, Decl *AssociatedDecl, unsigned Index) : Expr(SubstNonTypeTemplateParmPackExprClass, T, ValueKind, OK_Ordinary), - Param(Param), Arguments(ArgPack.pack_begin()), - NumArguments(ArgPack.pack_size()), NameLoc(NameLoc) { + AssociatedDecl(AssociatedDecl), Arguments(ArgPack.pack_begin()), + NumArguments(ArgPack.pack_size()), Index(Index), NameLoc(NameLoc) { + assert(AssociatedDecl != nullptr); setDependence(ExprDependence::TypeValueInstantiation | ExprDependence::UnexpandedPack); } +NonTypeTemplateParmDecl * +SubstNonTypeTemplateParmPackExpr::getParameterPack() const { + return cast<NonTypeTemplateParmDecl>( + getReplacedTemplateParameterList(getAssociatedDecl())->asArray()[Index]); +} + TemplateArgument SubstNonTypeTemplateParmPackExpr::getArgumentPack() const { - return TemplateArgument(llvm::makeArrayRef(Arguments, NumArguments)); + return TemplateArgument(llvm::ArrayRef(Arguments, NumArguments)); } FunctionParmPackExpr::FunctionParmPackExpr(QualType T, VarDecl *ParamPack, @@ -1742,3 +1929,21 @@ CUDAKernelCallExpr *CUDAKernelCallExpr::CreateEmpty(const ASTContext &Ctx, alignof(CUDAKernelCallExpr)); return new (Mem) CUDAKernelCallExpr(NumArgs, HasFPFeatures, Empty); } + +CXXParenListInitExpr * +CXXParenListInitExpr::Create(ASTContext &C, ArrayRef<Expr *> Args, QualType T, + unsigned NumUserSpecifiedExprs, + SourceLocation InitLoc, SourceLocation LParenLoc, + SourceLocation RParenLoc) { + void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(Args.size())); + return new (Mem) CXXParenListInitExpr(Args, T, NumUserSpecifiedExprs, InitLoc, + LParenLoc, RParenLoc); +} + +CXXParenListInitExpr *CXXParenListInitExpr::CreateEmpty(ASTContext &C, + unsigned NumExprs, + EmptyShell Empty) { + void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(NumExprs), + alignof(CXXParenListInitExpr)); + return new (Mem) CXXParenListInitExpr(Empty, NumExprs); +} diff --git a/contrib/llvm-project/clang/lib/AST/ExprClassification.cpp b/contrib/llvm-project/clang/lib/AST/ExprClassification.cpp index 6998e28fd2ea..6482cb6d39ac 100644 --- a/contrib/llvm-project/clang/lib/AST/ExprClassification.cpp +++ b/contrib/llvm-project/clang/lib/AST/ExprClassification.cpp @@ -145,7 +145,7 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) { case Expr::FunctionParmPackExprClass: case Expr::MSPropertyRefExprClass: case Expr::MSPropertySubscriptExprClass: - case Expr::OMPArraySectionExprClass: + case Expr::ArraySectionExprClass: case Expr::OMPArrayShapingExprClass: case Expr::OMPIteratorExprClass: return Cl::CL_LValue; @@ -160,7 +160,6 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) { case Expr::CXXPseudoDestructorExprClass: case Expr::UnaryExprOrTypeTraitExprClass: case Expr::CXXNewExprClass: - case Expr::CXXThisExprClass: case Expr::CXXNullPtrLiteralExprClass: case Expr::ImaginaryLiteralClass: case Expr::GNUNullExprClass: @@ -205,6 +204,15 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) { case Expr::RequiresExprClass: return Cl::CL_PRValue; + case Expr::EmbedExprClass: + // Nominally, this just goes through as a PRValue until we actually expand + // it and check it. + return Cl::CL_PRValue; + + // Make HLSL this reference-like + case Expr::CXXThisExprClass: + return Lang.HLSL ? Cl::CL_LValue : Cl::CL_PRValue; + case Expr::ConstantExprClass: return ClassifyInternal(Ctx, cast<ConstantExpr>(E)->getSubExpr()); @@ -213,6 +221,14 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) { return ClassifyInternal(Ctx, cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement()); + case Expr::PackIndexingExprClass: { + // A pack-index-expression always expands to an id-expression. + // Consider it as an LValue expression. + if (cast<PackIndexingExpr>(E)->isInstantiationDependent()) + return Cl::CL_LValue; + return ClassifyInternal(Ctx, cast<PackIndexingExpr>(E)->getSelectedExpr()); + } + // C, C++98 [expr.sub]p1: The result is an lvalue of type "T". // C++11 (DR1213): in the case of an array operand, the result is an lvalue // if that operand is an lvalue and an xvalue otherwise. @@ -442,6 +458,11 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) { case Expr::SYCLUniqueStableNameExprClass: return Cl::CL_PRValue; break; + + case Expr::CXXParenListInitExprClass: + if (isa<ArrayType>(E->getType())) + return Cl::CL_ArrayTemporary; + return Cl::CL_ClassTemporary; } llvm_unreachable("unhandled expression kind in classification"); @@ -457,22 +478,24 @@ static Cl::Kinds ClassifyDecl(ASTContext &Ctx, const Decl *D) { // lvalue unless it's a reference type (C++ [temp.param]p6), so we need to // special-case this. - if (isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) - return Cl::CL_MemberFunction; + if (const auto *M = dyn_cast<CXXMethodDecl>(D)) { + if (M->isImplicitObjectMemberFunction()) + return Cl::CL_MemberFunction; + if (M->isStatic()) + return Cl::CL_LValue; + return Cl::CL_PRValue; + } bool islvalue; if (const auto *NTTParm = dyn_cast<NonTypeTemplateParmDecl>(D)) islvalue = NTTParm->getType()->isReferenceType() || NTTParm->getType()->isRecordType(); else - islvalue = isa<VarDecl>(D) || isa<FieldDecl>(D) || - isa<IndirectFieldDecl>(D) || - isa<BindingDecl>(D) || - isa<MSGuidDecl>(D) || - isa<TemplateParamObjectDecl>(D) || - (Ctx.getLangOpts().CPlusPlus && - (isa<FunctionDecl>(D) || isa<MSPropertyDecl>(D) || - isa<FunctionTemplateDecl>(D))); + islvalue = + isa<VarDecl, FieldDecl, IndirectFieldDecl, BindingDecl, MSGuidDecl, + UnnamedGlobalConstantDecl, TemplateParamObjectDecl>(D) || + (Ctx.getLangOpts().CPlusPlus && + (isa<FunctionDecl, MSPropertyDecl, FunctionTemplateDecl>(D))); return islvalue ? Cl::CL_LValue : Cl::CL_PRValue; } @@ -546,8 +569,13 @@ static Cl::Kinds ClassifyMemberExpr(ASTContext &Ctx, const MemberExpr *E) { // -- If it refers to a static member function [...], then E1.E2 is an // lvalue; [...] // -- Otherwise [...] E1.E2 is a prvalue. - if (const auto *Method = dyn_cast<CXXMethodDecl>(Member)) - return Method->isStatic() ? Cl::CL_LValue : Cl::CL_MemberFunction; + if (const auto *Method = dyn_cast<CXXMethodDecl>(Member)) { + if (Method->isStatic()) + return Cl::CL_LValue; + if (Method->isImplicitObjectMemberFunction()) + return Cl::CL_MemberFunction; + return Cl::CL_PRValue; + } // -- If E2 is a member enumerator [...], the expression E1.E2 is a prvalue. // So is everything else we haven't handled yet. diff --git a/contrib/llvm-project/clang/lib/AST/ExprConcepts.cpp b/contrib/llvm-project/clang/lib/AST/ExprConcepts.cpp index 8cb8625e2a1a..0704630c0fc2 100644 --- a/contrib/llvm-project/clang/lib/AST/ExprConcepts.cpp +++ b/contrib/llvm-project/clang/lib/AST/ExprConcepts.cpp @@ -31,75 +31,48 @@ using namespace clang; ConceptSpecializationExpr::ConceptSpecializationExpr( - const ASTContext &C, NestedNameSpecifierLoc NNS, - SourceLocation TemplateKWLoc, DeclarationNameInfo ConceptNameInfo, - NamedDecl *FoundDecl, ConceptDecl *NamedConcept, - const ASTTemplateArgumentListInfo *ArgsAsWritten, - ArrayRef<TemplateArgument> ConvertedArgs, + const ASTContext &C, ConceptReference *Loc, + ImplicitConceptSpecializationDecl *SpecDecl, const ConstraintSatisfaction *Satisfaction) : Expr(ConceptSpecializationExprClass, C.BoolTy, VK_PRValue, OK_Ordinary), - ConceptReference(NNS, TemplateKWLoc, ConceptNameInfo, FoundDecl, - NamedConcept, ArgsAsWritten), - NumTemplateArgs(ConvertedArgs.size()), + ConceptRef(Loc), SpecDecl(SpecDecl), Satisfaction(Satisfaction ? ASTConstraintSatisfaction::Create(C, *Satisfaction) : nullptr) { - setTemplateArguments(ConvertedArgs); setDependence(computeDependence(this, /*ValueDependent=*/!Satisfaction)); // Currently guaranteed by the fact concepts can only be at namespace-scope. - assert(!NestedNameSpec || - (!NestedNameSpec.getNestedNameSpecifier()->isInstantiationDependent() && - !NestedNameSpec.getNestedNameSpecifier() - ->containsUnexpandedParameterPack())); + assert(!Loc->getNestedNameSpecifierLoc() || + (!Loc->getNestedNameSpecifierLoc() + .getNestedNameSpecifier() + ->isInstantiationDependent() && + !Loc->getNestedNameSpecifierLoc() + .getNestedNameSpecifier() + ->containsUnexpandedParameterPack())); assert((!isValueDependent() || isInstantiationDependent()) && "should not be value-dependent"); } -ConceptSpecializationExpr::ConceptSpecializationExpr(EmptyShell Empty, - unsigned NumTemplateArgs) - : Expr(ConceptSpecializationExprClass, Empty), ConceptReference(), - NumTemplateArgs(NumTemplateArgs) { } - -void ConceptSpecializationExpr::setTemplateArguments( - ArrayRef<TemplateArgument> Converted) { - assert(Converted.size() == NumTemplateArgs); - std::uninitialized_copy(Converted.begin(), Converted.end(), - getTrailingObjects<TemplateArgument>()); -} +ConceptSpecializationExpr::ConceptSpecializationExpr(EmptyShell Empty) + : Expr(ConceptSpecializationExprClass, Empty) {} ConceptSpecializationExpr * -ConceptSpecializationExpr::Create(const ASTContext &C, - NestedNameSpecifierLoc NNS, - SourceLocation TemplateKWLoc, - DeclarationNameInfo ConceptNameInfo, - NamedDecl *FoundDecl, - ConceptDecl *NamedConcept, - const ASTTemplateArgumentListInfo *ArgsAsWritten, - ArrayRef<TemplateArgument> ConvertedArgs, +ConceptSpecializationExpr::Create(const ASTContext &C, ConceptReference *Loc, + ImplicitConceptSpecializationDecl *SpecDecl, const ConstraintSatisfaction *Satisfaction) { - void *Buffer = C.Allocate(totalSizeToAlloc<TemplateArgument>( - ConvertedArgs.size())); - return new (Buffer) ConceptSpecializationExpr(C, NNS, TemplateKWLoc, - ConceptNameInfo, FoundDecl, - NamedConcept, ArgsAsWritten, - ConvertedArgs, Satisfaction); + return new (C) ConceptSpecializationExpr(C, Loc, SpecDecl, Satisfaction); } ConceptSpecializationExpr::ConceptSpecializationExpr( - const ASTContext &C, ConceptDecl *NamedConcept, - ArrayRef<TemplateArgument> ConvertedArgs, + const ASTContext &C, ConceptReference *Loc, + ImplicitConceptSpecializationDecl *SpecDecl, const ConstraintSatisfaction *Satisfaction, bool Dependent, bool ContainsUnexpandedParameterPack) : Expr(ConceptSpecializationExprClass, C.BoolTy, VK_PRValue, OK_Ordinary), - ConceptReference(NestedNameSpecifierLoc(), SourceLocation(), - DeclarationNameInfo(), NamedConcept, NamedConcept, - nullptr), - NumTemplateArgs(ConvertedArgs.size()), + ConceptRef(Loc), SpecDecl(SpecDecl), Satisfaction(Satisfaction ? ASTConstraintSatisfaction::Create(C, *Satisfaction) : nullptr) { - setTemplateArguments(ConvertedArgs); ExprDependence D = ExprDependence::None; if (!Satisfaction) D |= ExprDependence::Value; @@ -111,25 +84,14 @@ ConceptSpecializationExpr::ConceptSpecializationExpr( } ConceptSpecializationExpr * -ConceptSpecializationExpr::Create(const ASTContext &C, - ConceptDecl *NamedConcept, - ArrayRef<TemplateArgument> ConvertedArgs, +ConceptSpecializationExpr::Create(const ASTContext &C, ConceptReference *Loc, + ImplicitConceptSpecializationDecl *SpecDecl, const ConstraintSatisfaction *Satisfaction, bool Dependent, bool ContainsUnexpandedParameterPack) { - void *Buffer = C.Allocate(totalSizeToAlloc<TemplateArgument>( - ConvertedArgs.size())); - return new (Buffer) ConceptSpecializationExpr( - C, NamedConcept, ConvertedArgs, Satisfaction, Dependent, - ContainsUnexpandedParameterPack); -} - -ConceptSpecializationExpr * -ConceptSpecializationExpr::Create(ASTContext &C, EmptyShell Empty, - unsigned NumTemplateArgs) { - void *Buffer = C.Allocate(totalSizeToAlloc<TemplateArgument>( - NumTemplateArgs)); - return new (Buffer) ConceptSpecializationExpr(Empty, NumTemplateArgs); + return new (C) + ConceptSpecializationExpr(C, Loc, SpecDecl, Satisfaction, Dependent, + ContainsUnexpandedParameterPack); } const TypeConstraint * @@ -141,14 +103,29 @@ concepts::ExprRequirement::ReturnTypeRequirement::getTypeConstraint() const { ->getTypeConstraint(); } +// Search through the requirements, and see if any have a RecoveryExpr in it, +// which means this RequiresExpr ALSO needs to be invalid. +static bool RequirementContainsError(concepts::Requirement *R) { + if (auto *ExprReq = dyn_cast<concepts::ExprRequirement>(R)) + return ExprReq->getExpr() && ExprReq->getExpr()->containsErrors(); + + if (auto *NestedReq = dyn_cast<concepts::NestedRequirement>(R)) + return !NestedReq->hasInvalidConstraint() && + NestedReq->getConstraintExpr() && + NestedReq->getConstraintExpr()->containsErrors(); + return false; +} + RequiresExpr::RequiresExpr(ASTContext &C, SourceLocation RequiresKWLoc, - RequiresExprBodyDecl *Body, + RequiresExprBodyDecl *Body, SourceLocation LParenLoc, ArrayRef<ParmVarDecl *> LocalParameters, + SourceLocation RParenLoc, ArrayRef<concepts::Requirement *> Requirements, SourceLocation RBraceLoc) : Expr(RequiresExprClass, C.BoolTy, VK_PRValue, OK_Ordinary), NumLocalParameters(LocalParameters.size()), - NumRequirements(Requirements.size()), Body(Body), RBraceLoc(RBraceLoc) { + NumRequirements(Requirements.size()), Body(Body), LParenLoc(LParenLoc), + RParenLoc(RParenLoc), RBraceLoc(RBraceLoc) { RequiresExprBits.IsSatisfied = false; RequiresExprBits.RequiresKWLoc = RequiresKWLoc; bool Dependent = false; @@ -167,6 +144,9 @@ RequiresExpr::RequiresExpr(ASTContext &C, SourceLocation RequiresKWLoc, if (!RequiresExprBits.IsSatisfied) break; } + + if (RequirementContainsError(R)) + setDependence(getDependence() | ExprDependence::Error); } std::copy(LocalParameters.begin(), LocalParameters.end(), getTrailingObjects<ParmVarDecl *>()); @@ -190,18 +170,18 @@ RequiresExpr::RequiresExpr(ASTContext &C, EmptyShell Empty, : Expr(RequiresExprClass, Empty), NumLocalParameters(NumLocalParameters), NumRequirements(NumRequirements) { } -RequiresExpr * -RequiresExpr::Create(ASTContext &C, SourceLocation RequiresKWLoc, - RequiresExprBodyDecl *Body, - ArrayRef<ParmVarDecl *> LocalParameters, - ArrayRef<concepts::Requirement *> Requirements, - SourceLocation RBraceLoc) { +RequiresExpr *RequiresExpr::Create( + ASTContext &C, SourceLocation RequiresKWLoc, RequiresExprBodyDecl *Body, + SourceLocation LParenLoc, ArrayRef<ParmVarDecl *> LocalParameters, + SourceLocation RParenLoc, ArrayRef<concepts::Requirement *> Requirements, + SourceLocation RBraceLoc) { void *Mem = C.Allocate(totalSizeToAlloc<ParmVarDecl *, concepts::Requirement *>( LocalParameters.size(), Requirements.size()), alignof(RequiresExpr)); - return new (Mem) RequiresExpr(C, RequiresKWLoc, Body, LocalParameters, - Requirements, RBraceLoc); + return new (Mem) + RequiresExpr(C, RequiresKWLoc, Body, LParenLoc, LocalParameters, + RParenLoc, Requirements, RBraceLoc); } RequiresExpr * diff --git a/contrib/llvm-project/clang/lib/AST/ExprConstShared.h b/contrib/llvm-project/clang/lib/AST/ExprConstShared.h new file mode 100644 index 000000000000..2a7088e4e371 --- /dev/null +++ b/contrib/llvm-project/clang/lib/AST/ExprConstShared.h @@ -0,0 +1,69 @@ +//===--- ExprConstShared.h - Shared consetxpr functionality ----*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Shared functionality between the new constant expression +// interpreter (AST/Interp/) and the current one (ExprConstant.cpp). +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_AST_EXPRCONSTSHARED_H +#define LLVM_CLANG_LIB_AST_EXPRCONSTSHARED_H + +namespace llvm { +class APFloat; +} +namespace clang { +class QualType; +class LangOptions; +} // namespace clang +using namespace clang; +/// Values returned by __builtin_classify_type, chosen to match the values +/// produced by GCC's builtin. +enum class GCCTypeClass { + None = -1, + Void = 0, + Integer = 1, + // GCC reserves 2 for character types, but instead classifies them as + // integers. + Enum = 3, + Bool = 4, + Pointer = 5, + // GCC reserves 6 for references, but appears to never use it (because + // expressions never have reference type, presumably). + PointerToDataMember = 7, + RealFloat = 8, + Complex = 9, + // GCC reserves 10 for functions, but does not use it since GCC version 6 due + // to decay to pointer. (Prior to version 6 it was only used in C++ mode). + // GCC claims to reserve 11 for pointers to member functions, but *actually* + // uses 12 for that purpose, same as for a class or struct. Maybe it + // internally implements a pointer to member as a struct? Who knows. + PointerToMemberFunction = 12, // Not a bug, see above. + ClassOrStruct = 12, + Union = 13, + // GCC reserves 14 for arrays, but does not use it since GCC version 6 due to + // decay to pointer. (Prior to version 6 it was only used in C++ mode). + // GCC reserves 15 for strings, but actually uses 5 (pointer) for string + // literals. + // Lang = 16, + // OpaqueType = 17, + BitInt = 18, + Vector = 19 +}; + +GCCTypeClass EvaluateBuiltinClassifyType(QualType T, + const LangOptions &LangOpts); + +void HandleComplexComplexMul(llvm::APFloat A, llvm::APFloat B, llvm::APFloat C, + llvm::APFloat D, llvm::APFloat &ResR, + llvm::APFloat &ResI); +void HandleComplexComplexDiv(llvm::APFloat A, llvm::APFloat B, llvm::APFloat C, + llvm::APFloat D, llvm::APFloat &ResR, + llvm::APFloat &ResI); + +#endif diff --git a/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp b/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp index ba2865d66e0a..5e57b5e8bc8f 100644 --- a/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp +++ b/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp @@ -32,6 +32,7 @@ // //===----------------------------------------------------------------------===// +#include "ExprConstShared.h" #include "Interp/Context.h" #include "Interp/Frame.h" #include "Interp/State.h" @@ -50,15 +51,19 @@ #include "clang/AST/StmtVisitor.h" #include "clang/AST/TypeLoc.h" #include "clang/Basic/Builtins.h" +#include "clang/Basic/DiagnosticSema.h" #include "clang/Basic/TargetInfo.h" #include "llvm/ADT/APFixedPoint.h" -#include "llvm/ADT/Optional.h" #include "llvm/ADT/SmallBitVector.h" +#include "llvm/ADT/StringExtras.h" #include "llvm/Support/Debug.h" #include "llvm/Support/SaveAndRestore.h" +#include "llvm/Support/SipHash.h" +#include "llvm/Support/TimeProfiler.h" #include "llvm/Support/raw_ostream.h" #include <cstring> #include <functional> +#include <optional> #define DEBUG_TYPE "exprconstant" @@ -68,7 +73,6 @@ using llvm::APInt; using llvm::APSInt; using llvm::APFloat; using llvm::FixedPointSemantics; -using llvm::Optional; namespace { struct LValue; @@ -206,7 +210,7 @@ namespace { IsArray = true; if (auto *CAT = dyn_cast<ConstantArrayType>(AT)) { - ArraySize = CAT->getSize().getZExtValue(); + ArraySize = CAT->getZExtSize(); } else { assert(I == 0 && "unexpected unsized array designator"); FirstEntryIsUnsizedArray = true; @@ -237,15 +241,19 @@ namespace { /// True if the subobject was named in a manner not supported by C++11. Such /// lvalues can still be folded, but they are not core constant expressions /// and we cannot perform lvalue-to-rvalue conversions on them. + LLVM_PREFERRED_TYPE(bool) unsigned Invalid : 1; /// Is this a pointer one past the end of an object? + LLVM_PREFERRED_TYPE(bool) unsigned IsOnePastTheEnd : 1; /// Indicator of whether the first entry is an unsized array. + LLVM_PREFERRED_TYPE(bool) unsigned FirstEntryIsAnUnsizedArray : 1; /// Indicator of whether the most-derived object is an array element. + LLVM_PREFERRED_TYPE(bool) unsigned MostDerivedIsArrayElement : 1; /// The length of the path to the most-derived object of which this is a @@ -394,7 +402,7 @@ namespace { // This is a most-derived object. MostDerivedType = CAT->getElementType(); MostDerivedIsArrayElement = true; - MostDerivedArraySize = CAT->getSize().getZExtValue(); + MostDerivedArraySize = CAT->getZExtSize(); MostDerivedPathLength = Entries.size(); } /// Update this designator to refer to the first element within the array of @@ -530,6 +538,9 @@ namespace { /// This - The binding for the this pointer in this call, if any. const LValue *This; + /// CallExpr - The syntactical structure of member function calls + const Expr *CallExpr; + /// Information on how to find the arguments to this call. Our arguments /// are stored in our parent's CallStackFrame, using the ParmVarDecl* as a /// key and this value as the version. @@ -546,8 +557,8 @@ namespace { /// Temporaries - Temporary lvalues materialized within this stack frame. MapTy Temporaries; - /// CallLoc - The location of the call expression for this call. - SourceLocation CallLoc; + /// CallRange - The source range of the call expression for this call. + SourceRange CallRange; /// Index - The call index of this call. unsigned Index; @@ -578,12 +589,12 @@ namespace { /// LambdaCaptureFields - Mapping from captured variables/this to /// corresponding data members in the closure class. - llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields; - FieldDecl *LambdaThisCaptureField; + llvm::DenseMap<const ValueDecl *, FieldDecl *> LambdaCaptureFields; + FieldDecl *LambdaThisCaptureField = nullptr; - CallStackFrame(EvalInfo &Info, SourceLocation CallLoc, + CallStackFrame(EvalInfo &Info, SourceRange CallRange, const FunctionDecl *Callee, const LValue *This, - CallRef Arguments); + const Expr *CallExpr, CallRef Arguments); ~CallStackFrame(); // Return the temporary for Key whose version number is Version. @@ -592,11 +603,6 @@ namespace { auto LB = Temporaries.lower_bound(KV); if (LB != Temporaries.end() && LB->first == KV) return &LB->second; - // Pair (Key,Version) wasn't found in the map. Check that no elements - // in the map have 'Key' as their key. - assert((LB == Temporaries.end() || LB->first.first != Key) && - (LB == Temporaries.begin() || std::prev(LB)->first.first != Key) && - "Element with key 'Key' found in map"); return nullptr; } @@ -627,10 +633,10 @@ namespace { /// Allocate storage for a parameter of a function call made in this frame. APValue &createParam(CallRef Args, const ParmVarDecl *PVD, LValue &LV); - void describe(llvm::raw_ostream &OS) override; + void describe(llvm::raw_ostream &OS) const override; Frame *getCaller() const override { return Caller; } - SourceLocation getCallLocation() const override { return CallLoc; } + SourceRange getCallRange() const override { return CallRange; } const FunctionDecl *getCallee() const override { return Callee; } bool isStdFunction() const { @@ -640,6 +646,10 @@ namespace { return false; } + /// Whether we're in a context where [[msvc::constexpr]] evaluation is + /// permitted. See MSConstexprDocs for description of permitted contexts. + bool CanEvalMSConstexpr = false; + private: APValue &createLocal(APValue::LValueBase Base, const void *Key, QualType T, ScopeKind Scope); @@ -660,6 +670,32 @@ namespace { CallStackFrame &Frame; const LValue *OldThis; }; + + // A shorthand time trace scope struct, prints source range, for example + // {"name":"EvaluateAsRValue","args":{"detail":"<test.cc:8:21, col:25>"}}} + class ExprTimeTraceScope { + public: + ExprTimeTraceScope(const Expr *E, const ASTContext &Ctx, StringRef Name) + : TimeScope(Name, [E, &Ctx] { + return E->getSourceRange().printToString(Ctx.getSourceManager()); + }) {} + + private: + llvm::TimeTraceScope TimeScope; + }; + + /// RAII object used to change the current ability of + /// [[msvc::constexpr]] evaulation. + struct MSConstexprContextRAII { + CallStackFrame &Frame; + bool OldValue; + explicit MSConstexprContextRAII(CallStackFrame &Frame, bool Value) + : Frame(Frame), OldValue(Frame.CanEvalMSConstexpr) { + Frame.CanEvalMSConstexpr = Value; + } + + ~MSConstexprContextRAII() { Frame.CanEvalMSConstexpr = OldValue; } + }; } static bool HandleDestruction(EvalInfo &Info, const Expr *E, @@ -917,10 +953,6 @@ namespace { /// fold (not just why it's not strictly a constant expression)? bool HasFoldFailureDiagnostic; - /// Whether or not we're in a context where the front end requires a - /// constant value. - bool InConstantContext; - /// Whether we're checking that an expression is a potential constant /// expression. If so, do not fail on constructs that could become constant /// later on (such as a use of an undefined global). @@ -973,16 +1005,19 @@ namespace { CallStackDepth(0), NextCallIndex(1), StepsLeft(C.getLangOpts().ConstexprStepLimit), EnableNewConstInterp(C.getLangOpts().EnableNewConstInterp), - BottomFrame(*this, SourceLocation(), nullptr, nullptr, CallRef()), + BottomFrame(*this, SourceLocation(), /*Callee=*/nullptr, + /*This=*/nullptr, + /*CallExpr=*/nullptr, CallRef()), EvaluatingDecl((const ValueDecl *)nullptr), EvaluatingDeclValue(nullptr), HasActiveDiagnostic(false), - HasFoldFailureDiagnostic(false), InConstantContext(false), - EvalMode(Mode) {} + HasFoldFailureDiagnostic(false), EvalMode(Mode) {} ~EvalInfo() { discardCleanups(); } + ASTContext &getCtx() const override { return Ctx; } + void setEvaluatingDecl(APValue::LValueBase Base, APValue &Value, EvaluatingDeclKind EDK = EvaluatingDeclKind::Ctor) { EvaluatingDecl = Base; @@ -1007,6 +1042,34 @@ namespace { return false; } + bool CheckArraySize(SourceLocation Loc, unsigned BitWidth, + uint64_t ElemCount, bool Diag) { + // FIXME: GH63562 + // APValue stores array extents as unsigned, + // so anything that is greater that unsigned would overflow when + // constructing the array, we catch this here. + if (BitWidth > ConstantArrayType::getMaxSizeBits(Ctx) || + ElemCount > uint64_t(std::numeric_limits<unsigned>::max())) { + if (Diag) + FFDiag(Loc, diag::note_constexpr_new_too_large) << ElemCount; + return false; + } + + // FIXME: GH63562 + // Arrays allocate an APValue per element. + // We use the number of constexpr steps as a proxy for the maximum size + // of arrays to avoid exhausting the system resources, as initialization + // of each element is likely to take some number of steps anyway. + uint64_t Limit = Ctx.getLangOpts().ConstexprStepLimit; + if (ElemCount > Limit) { + if (Diag) + FFDiag(Loc, diag::note_constexpr_new_exceeds_limits) + << ElemCount << Limit; + return false; + } + return true; + } + std::pair<CallStackFrame *, unsigned> getCallFrameAndDepth(unsigned CallIndex) { assert(CallIndex && "no call index in getCallFrameAndDepth"); @@ -1034,8 +1097,8 @@ namespace { APValue *createHeapAlloc(const Expr *E, QualType T, LValue &LV); - Optional<DynAlloc*> lookupDynamicAlloc(DynamicAllocLValue DA) { - Optional<DynAlloc*> Result; + std::optional<DynAlloc *> lookupDynamicAlloc(DynamicAllocLValue DA) { + std::optional<DynAlloc *> Result; auto It = HeapAllocs.find(DA); if (It != HeapAllocs.end()) Result = &It->second; @@ -1084,14 +1147,10 @@ namespace { void performLifetimeExtension() { // Disable the cleanups for lifetime-extended temporaries. - CleanupStack.erase(std::remove_if(CleanupStack.begin(), - CleanupStack.end(), - [](Cleanup &C) { - return !C.isDestroyedAtEndOf( - ScopeKind::FullExpression); - }), - CleanupStack.end()); - } + llvm::erase_if(CleanupStack, [](Cleanup &C) { + return !C.isDestroyedAtEndOf(ScopeKind::FullExpression); + }); + } /// Throw away any remaining cleanups at the end of evaluation. If any /// cleanups would have had a side-effect, note that as an unmodeled @@ -1120,8 +1179,6 @@ namespace { Expr::EvalStatus &getEvalStatus() const override { return EvalStatus; } - ASTContext &getCtx() const override { return Ctx; } - // If we have a prior diagnostic, it will be noting that the expression // isn't a constant expression. This diagnostic is more important, // unless we require this evaluation to produce a constant expression. @@ -1136,7 +1193,7 @@ namespace { if (!HasFoldFailureDiagnostic) break; // We've already failed to fold something. Keep that diagnostic. - LLVM_FALLTHROUGH; + [[fallthrough]]; case EM_ConstantExpression: case EM_ConstantExpressionUnevaluated: setActiveDiagnostic(false); @@ -1223,7 +1280,7 @@ namespace { /// (Foo(), 1) // use noteSideEffect /// (Foo() || true) // use noteSideEffect /// Foo() + 1 // use noteFailure - LLVM_NODISCARD bool noteFailure() { + [[nodiscard]] bool noteFailure() { // Failure when evaluating some expression often means there is some // subexpression whose evaluation was skipped. Therefore, (because we // don't track whether we skipped an expression when unwinding after an @@ -1295,7 +1352,7 @@ namespace { class SpeculativeEvaluationRAII { EvalInfo *Info = nullptr; Expr::EvalStatus OldStatus; - unsigned OldSpeculativeEvaluationDepth; + unsigned OldSpeculativeEvaluationDepth = 0; void moveFromAndCancel(SpeculativeEvaluationRAII &&Other) { Info = Other.Info; @@ -1434,11 +1491,12 @@ void SubobjectDesignator::diagnosePointerArithmetic(EvalInfo &Info, setInvalid(); } -CallStackFrame::CallStackFrame(EvalInfo &Info, SourceLocation CallLoc, +CallStackFrame::CallStackFrame(EvalInfo &Info, SourceRange CallRange, const FunctionDecl *Callee, const LValue *This, - CallRef Call) + const Expr *CallExpr, CallRef Call) : Info(Info), Caller(Info.CurrentCall), Callee(Callee), This(This), - Arguments(Call), CallLoc(CallLoc), Index(Info.NextCallIndex++) { + CallExpr(CallExpr), Arguments(Call), CallRange(CallRange), + Index(Info.NextCallIndex++) { Info.CurrentCall = this; ++Info.CallStackDepth; } @@ -1710,8 +1768,8 @@ namespace { struct MemberPtr { MemberPtr() {} - explicit MemberPtr(const ValueDecl *Decl) : - DeclAndIsDerivedMember(Decl, false), Path() {} + explicit MemberPtr(const ValueDecl *Decl) + : DeclAndIsDerivedMember(Decl, false) {} /// The member or (direct or indirect) field referred to by this member /// pointer, or 0 if this is a null member pointer. @@ -1826,6 +1884,9 @@ static bool EvaluateComplex(const Expr *E, ComplexValue &Res, EvalInfo &Info); static bool EvaluateAtomic(const Expr *E, const LValue *This, APValue &Result, EvalInfo &Info); static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result); +static bool EvaluateBuiltinStrLen(const Expr *E, uint64_t &Result, + EvalInfo &Info, + std::string *StringResult = nullptr); /// Evaluate an integer or fixed point expression into an APResult. static bool EvaluateFixedPointOrInteger(const Expr *E, APFixedPoint &Result, @@ -1906,25 +1967,46 @@ APValue *EvalInfo::createHeapAlloc(const Expr *E, QualType T, LValue &LV) { } /// Produce a string describing the given constexpr call. -void CallStackFrame::describe(raw_ostream &Out) { +void CallStackFrame::describe(raw_ostream &Out) const { unsigned ArgIndex = 0; - bool IsMemberCall = isa<CXXMethodDecl>(Callee) && - !isa<CXXConstructorDecl>(Callee) && - cast<CXXMethodDecl>(Callee)->isInstance(); + bool IsMemberCall = + isa<CXXMethodDecl>(Callee) && !isa<CXXConstructorDecl>(Callee) && + cast<CXXMethodDecl>(Callee)->isImplicitObjectMemberFunction(); if (!IsMemberCall) - Out << *Callee << '('; + Callee->getNameForDiagnostic(Out, Info.Ctx.getPrintingPolicy(), + /*Qualified=*/false); if (This && IsMemberCall) { - APValue Val; - This->moveInto(Val); - Val.printPretty(Out, Info.Ctx, - This->Designator.MostDerivedType); - // FIXME: Add parens around Val if needed. - Out << "->" << *Callee << '('; + if (const auto *MCE = dyn_cast_if_present<CXXMemberCallExpr>(CallExpr)) { + const Expr *Object = MCE->getImplicitObjectArgument(); + Object->printPretty(Out, /*Helper=*/nullptr, Info.Ctx.getPrintingPolicy(), + /*Indentation=*/0); + if (Object->getType()->isPointerType()) + Out << "->"; + else + Out << "."; + } else if (const auto *OCE = + dyn_cast_if_present<CXXOperatorCallExpr>(CallExpr)) { + OCE->getArg(0)->printPretty(Out, /*Helper=*/nullptr, + Info.Ctx.getPrintingPolicy(), + /*Indentation=*/0); + Out << "."; + } else { + APValue Val; + This->moveInto(Val); + Val.printPretty( + Out, Info.Ctx, + Info.Ctx.getLValueReferenceType(This->Designator.MostDerivedType)); + Out << "."; + } + Callee->getNameForDiagnostic(Out, Info.Ctx.getPrintingPolicy(), + /*Qualified=*/false); IsMemberCall = false; } + Out << '('; + for (FunctionDecl::param_const_iterator I = Callee->param_begin(), E = Callee->param_end(); I != E; ++I, ++ArgIndex) { if (ArgIndex > (unsigned)IsMemberCall) @@ -1956,11 +2038,13 @@ static bool EvaluateIgnoredValue(EvalInfo &Info, const Expr *E) { return true; } -/// Should this call expression be treated as a string literal? -static bool IsStringLiteralCall(const CallExpr *E) { +/// Should this call expression be treated as a no-op? +static bool IsNoOpCall(const CallExpr *E) { unsigned Builtin = E->getBuiltinCallee(); return (Builtin == Builtin::BI__builtin___CFStringMakeConstantString || - Builtin == Builtin::BI__builtin___NSStringMakeConstantString); + Builtin == Builtin::BI__builtin___NSStringMakeConstantString || + Builtin == Builtin::BI__builtin_ptrauth_sign_constant || + Builtin == Builtin::BI__builtin_function_start); } static bool IsGlobalLValue(APValue::LValueBase B) { @@ -1969,7 +2053,8 @@ static bool IsGlobalLValue(APValue::LValueBase B) { // ... a null pointer value, or a prvalue core constant expression of type // std::nullptr_t. - if (!B) return true; + if (!B) + return true; if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) { // ... the address of an object with static storage duration, @@ -1979,7 +2064,8 @@ static bool IsGlobalLValue(APValue::LValueBase B) { return true; // ... the address of a function, // ... the address of a GUID [MS extension], - return isa<FunctionDecl>(D) || isa<MSGuidDecl>(D); + // ... the address of an unnamed global constant + return isa<FunctionDecl, MSGuidDecl, UnnamedGlobalConstantDecl>(D); } if (B.is<TypeInfoLValue>() || B.is<DynamicAllocLValue>()) @@ -2006,7 +2092,7 @@ static bool IsGlobalLValue(APValue::LValueBase B) { case Expr::ObjCBoxedExprClass: return cast<ObjCBoxedExpr>(E)->isExpressibleAsConstantInitializer(); case Expr::CallExprClass: - return IsStringLiteralCall(cast<CallExpr>(E)); + return IsNoOpCall(cast<CallExpr>(E)); // For GCC compatibility, &&label has static storage duration. case Expr::AddrLabelExprClass: return true; @@ -2014,6 +2100,10 @@ static bool IsGlobalLValue(APValue::LValueBase B) { // Block variables at global or local static scope. case Expr::BlockExprClass: return !cast<BlockExpr>(E)->getBlockDecl()->hasCaptures(); + // The APValue generated from a __builtin_source_location will be emitted as a + // literal. + case Expr::SourceLocExprClass: + return true; case Expr::ImplicitValueInitExprClass: // FIXME: // We can never form an lvalue with an implicit value initialization as its @@ -2043,7 +2133,7 @@ static bool IsWeakLValue(const LValue &Value) { static bool isZeroSized(const LValue &Value) { const ValueDecl *Decl = GetLValueBaseDecl(Value); - if (Decl && isa<VarDecl>(Decl)) { + if (isa_and_nonnull<VarDecl>(Decl)) { QualType Ty = Decl->getType(); if (Ty->isArrayType()) return Ty->isIncompleteType() || @@ -2091,10 +2181,11 @@ static void NoteLValueLocation(EvalInfo &Info, APValue::LValueBase Base) { Info.Note(E->getExprLoc(), diag::note_constexpr_temporary_here); else if (DynamicAllocLValue DA = Base.dyn_cast<DynamicAllocLValue>()) { // FIXME: Produce a note for dangling pointers too. - if (Optional<DynAlloc*> Alloc = Info.lookupDynamicAlloc(DA)) + if (std::optional<DynAlloc *> Alloc = Info.lookupDynamicAlloc(DA)) Info.Note((*Alloc)->AllocExpr->getExprLoc(), diag::note_constexpr_dynamic_alloc_here); } + // We have no information to show for a typeid(T) object. } @@ -2112,7 +2203,7 @@ static bool CheckEvaluationResult(CheckEvaluationResultKind CERK, EvalInfo &Info, SourceLocation DiagLoc, QualType Type, const APValue &Value, ConstantExprKind Kind, - SourceLocation SubobjectLoc, + const FieldDecl *SubobjectDecl, CheckedTemporaries &CheckedTemps); /// Check that this reference or pointer core constant expression is a valid @@ -2156,13 +2247,12 @@ static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc, } } - if (auto *FD = dyn_cast_or_null<FunctionDecl>(BaseVD)) { - if (FD->isConsteval()) { - Info.FFDiag(Loc, diag::note_consteval_address_accessible) - << !Type->isAnyPointerType(); - Info.Note(FD->getLocation(), diag::note_declared_at); - return false; - } + if (auto *FD = dyn_cast_or_null<FunctionDecl>(BaseVD); + FD && FD->isImmediateFunction()) { + Info.FFDiag(Loc, diag::note_consteval_address_accessible) + << !Type->isAnyPointerType(); + Info.Note(FD->getLocation(), diag::note_declared_at); + return false; } // Check that the object is a global. Note that the fake 'this' object we @@ -2170,12 +2260,10 @@ static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc, // assumed to be global here. if (!IsGlobalLValue(Base)) { if (Info.getLangOpts().CPlusPlus11) { - const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>(); Info.FFDiag(Loc, diag::note_constexpr_non_global, 1) - << IsReferenceType << !Designator.Entries.empty() - << !!VD << VD; - - auto *VarD = dyn_cast_or_null<VarDecl>(VD); + << IsReferenceType << !Designator.Entries.empty() << !!BaseVD + << BaseVD; + auto *VarD = dyn_cast_or_null<VarDecl>(BaseVD); if (VarD && VarD->isConstexpr()) { // Non-static local constexpr variables have unintuitive semantics: // constexpr int a = 1; @@ -2217,6 +2305,19 @@ static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc, if (!isForManglingOnly(Kind) && Var->hasAttr<DLLImportAttr>()) // FIXME: Diagnostic! return false; + + // In CUDA/HIP device compilation, only device side variables have + // constant addresses. + if (Info.getCtx().getLangOpts().CUDA && + Info.getCtx().getLangOpts().CUDAIsDevice && + Info.getCtx().CUDAConstantEvalCtx.NoWrongSidedVars) { + if ((!Var->hasAttr<CUDADeviceAttr>() && + !Var->hasAttr<CUDAConstantAttr>() && + !Var->getType()->isCUDADeviceBuiltinSurfaceType() && + !Var->getType()->isCUDADeviceBuiltinTextureType()) || + Var->hasAttr<HIPManagedAttr>()) + return false; + } } if (const auto *FD = dyn_cast<const FunctionDecl>(BaseVD)) { // __declspec(dllimport) must be handled very carefully: @@ -2248,8 +2349,8 @@ static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc, APValue *V = MTE->getOrCreateValue(false); assert(V && "evasluation result refers to uninitialised temporary"); if (!CheckEvaluationResult(CheckEvaluationResultKind::ConstantExpression, - Info, MTE->getExprLoc(), TempType, *V, - Kind, SourceLocation(), CheckedTemps)) + Info, MTE->getExprLoc(), TempType, *V, Kind, + /*SubobjectDecl=*/nullptr, CheckedTemps)) return false; } } @@ -2287,7 +2388,7 @@ static bool CheckMemberPointerConstantExpression(EvalInfo &Info, const auto *FD = dyn_cast_or_null<CXXMethodDecl>(Member); if (!FD) return true; - if (FD->isConsteval()) { + if (FD->isImmediateFunction()) { Info.FFDiag(Loc, diag::note_consteval_address_accessible) << /*pointer*/ 0; Info.Note(FD->getLocation(), diag::note_declared_at); return false; @@ -2332,13 +2433,18 @@ static bool CheckEvaluationResult(CheckEvaluationResultKind CERK, EvalInfo &Info, SourceLocation DiagLoc, QualType Type, const APValue &Value, ConstantExprKind Kind, - SourceLocation SubobjectLoc, + const FieldDecl *SubobjectDecl, CheckedTemporaries &CheckedTemps) { if (!Value.hasValue()) { - Info.FFDiag(DiagLoc, diag::note_constexpr_uninitialized) - << true << Type; - if (SubobjectLoc.isValid()) - Info.Note(SubobjectLoc, diag::note_constexpr_subobject_declared_here); + if (SubobjectDecl) { + Info.FFDiag(DiagLoc, diag::note_constexpr_uninitialized) + << /*(name)*/ 1 << SubobjectDecl; + Info.Note(SubobjectDecl->getLocation(), + diag::note_constexpr_subobject_declared_here); + } else { + Info.FFDiag(DiagLoc, diag::note_constexpr_uninitialized) + << /*of type*/ 0 << Type; + } return false; } @@ -2355,40 +2461,46 @@ static bool CheckEvaluationResult(CheckEvaluationResultKind CERK, for (unsigned I = 0, N = Value.getArrayInitializedElts(); I != N; ++I) { if (!CheckEvaluationResult(CERK, Info, DiagLoc, EltTy, Value.getArrayInitializedElt(I), Kind, - SubobjectLoc, CheckedTemps)) + SubobjectDecl, CheckedTemps)) return false; } if (!Value.hasArrayFiller()) return true; return CheckEvaluationResult(CERK, Info, DiagLoc, EltTy, - Value.getArrayFiller(), Kind, SubobjectLoc, + Value.getArrayFiller(), Kind, SubobjectDecl, CheckedTemps); } if (Value.isUnion() && Value.getUnionField()) { return CheckEvaluationResult( CERK, Info, DiagLoc, Value.getUnionField()->getType(), - Value.getUnionValue(), Kind, Value.getUnionField()->getLocation(), - CheckedTemps); + Value.getUnionValue(), Kind, Value.getUnionField(), CheckedTemps); } if (Value.isStruct()) { RecordDecl *RD = Type->castAs<RecordType>()->getDecl(); if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD)) { unsigned BaseIndex = 0; for (const CXXBaseSpecifier &BS : CD->bases()) { - if (!CheckEvaluationResult(CERK, Info, DiagLoc, BS.getType(), - Value.getStructBase(BaseIndex), Kind, - BS.getBeginLoc(), CheckedTemps)) + const APValue &BaseValue = Value.getStructBase(BaseIndex); + if (!BaseValue.hasValue()) { + SourceLocation TypeBeginLoc = BS.getBaseTypeLoc(); + Info.FFDiag(TypeBeginLoc, diag::note_constexpr_uninitialized_base) + << BS.getType() << SourceRange(TypeBeginLoc, BS.getEndLoc()); + return false; + } + if (!CheckEvaluationResult(CERK, Info, DiagLoc, BS.getType(), BaseValue, + Kind, /*SubobjectDecl=*/nullptr, + CheckedTemps)) return false; ++BaseIndex; } } for (const auto *I : RD->fields()) { - if (I->isUnnamedBitfield()) + if (I->isUnnamedBitField()) continue; if (!CheckEvaluationResult(CERK, Info, DiagLoc, I->getType(), - Value.getStructField(I->getFieldIndex()), - Kind, I->getLocation(), CheckedTemps)) + Value.getStructField(I->getFieldIndex()), Kind, + I, CheckedTemps)) return false; } } @@ -2422,7 +2534,7 @@ static bool CheckConstantExpression(EvalInfo &Info, SourceLocation DiagLoc, CheckedTemporaries CheckedTemps; return CheckEvaluationResult(CheckEvaluationResultKind::ConstantExpression, Info, DiagLoc, Type, Value, Kind, - SourceLocation(), CheckedTemps); + /*SubobjectDecl=*/nullptr, CheckedTemps); } /// Check that this evaluated value is fully-initialized and can be loaded by @@ -2432,7 +2544,7 @@ static bool CheckFullyInitialized(EvalInfo &Info, SourceLocation DiagLoc, CheckedTemporaries CheckedTemps; return CheckEvaluationResult( CheckEvaluationResultKind::FullyInitialized, Info, DiagLoc, Type, Value, - ConstantExprKind::Normal, SourceLocation(), CheckedTemps); + ConstantExprKind::Normal, /*SubobjectDecl=*/nullptr, CheckedTemps); } /// Enforce C++2a [expr.const]/4.17, which disallows new-expressions unless @@ -2453,6 +2565,7 @@ static bool EvalPointerValueAsBool(const APValue &Value, bool &Result) { // A null base expression indicates a null pointer. These are always // evaluatable, and they are false unless the offset is zero. if (!Value.getLValueBase()) { + // TODO: Should a non-null pointer with an offset of zero evaluate to true? Result = !Value.getLValueOffset().isZero(); return true; } @@ -2465,6 +2578,7 @@ static bool EvalPointerValueAsBool(const APValue &Value, bool &Result) { } static bool HandleConversionToBool(const APValue &Val, bool &Result) { + // TODO: This function should produce notes if it fails. switch (Val.getKind()) { case APValue::None: case APValue::Indeterminate: @@ -2489,6 +2603,9 @@ static bool HandleConversionToBool(const APValue &Val, bool &Result) { case APValue::LValue: return EvalPointerValueAsBool(Val, Result); case APValue::MemberPointer: + if (Val.getMemberPointerDecl() && Val.getMemberPointerDecl()->isWeak()) { + return false; + } Result = Val.getMemberPointerDecl(); return true; case APValue::Vector: @@ -2535,18 +2652,15 @@ static bool HandleFloatToIntCast(EvalInfo &Info, const Expr *E, return true; } -/// Get rounding mode used for evaluation of the specified expression. -/// \param[out] DynamicRM Is set to true is the requested rounding mode is -/// dynamic. +/// Get rounding mode to use in evaluation of the specified expression. +/// /// If rounding mode is unknown at compile time, still try to evaluate the /// expression. If the result is exact, it does not depend on rounding mode. /// So return "tonearest" mode instead of "dynamic". -static llvm::RoundingMode getActiveRoundingMode(EvalInfo &Info, const Expr *E, - bool &DynamicRM) { +static llvm::RoundingMode getActiveRoundingMode(EvalInfo &Info, const Expr *E) { llvm::RoundingMode RM = E->getFPFeaturesInEffect(Info.Ctx.getLangOpts()).getRoundingMode(); - DynamicRM = (RM == llvm::RoundingMode::Dynamic); - if (DynamicRM) + if (RM == llvm::RoundingMode::Dynamic) RM = llvm::RoundingMode::NearestTiesToEven; return RM; } @@ -2570,14 +2684,14 @@ static bool checkFloatingPointResult(EvalInfo &Info, const Expr *E, if ((St != APFloat::opOK) && (FPO.getRoundingMode() == llvm::RoundingMode::Dynamic || - FPO.getFPExceptionMode() != LangOptions::FPE_Ignore || + FPO.getExceptionMode() != LangOptions::FPE_Ignore || FPO.getAllowFEnvAccess())) { Info.FFDiag(E, diag::note_constexpr_float_arithmetic_strict); return false; } if ((St & APFloat::opStatus::opInvalidOp) && - FPO.getFPExceptionMode() != LangOptions::FPE_Ignore) { + FPO.getExceptionMode() != LangOptions::FPE_Ignore) { // There is no usefully definable result. Info.FFDiag(E); return false; @@ -2595,9 +2709,12 @@ static bool checkFloatingPointResult(EvalInfo &Info, const Expr *E, static bool HandleFloatToFloatCast(EvalInfo &Info, const Expr *E, QualType SrcType, QualType DestType, APFloat &Result) { - assert(isa<CastExpr>(E) || isa<CompoundAssignOperator>(E)); - bool DynamicRM; - llvm::RoundingMode RM = getActiveRoundingMode(Info, E, DynamicRM); + assert((isa<CastExpr>(E) || isa<CompoundAssignOperator>(E) || + isa<ConvertVectorExpr>(E)) && + "HandleFloatToFloatCast has been checked with only CastExpr, " + "CompoundAssignOperator and ConvertVectorExpr. Please either validate " + "the new expression or address the root cause of this usage."); + llvm::RoundingMode RM = getActiveRoundingMode(Info, E); APFloat::opStatus St; APFloat Value = Result; bool ignored; @@ -2623,14 +2740,9 @@ static bool HandleIntToFloatCast(EvalInfo &Info, const Expr *E, QualType SrcType, const APSInt &Value, QualType DestType, APFloat &Result) { Result = APFloat(Info.Ctx.getFloatTypeSemantics(DestType), 1); - APFloat::opStatus St = Result.convertFromAPInt(Value, Value.isSigned(), - APFloat::rmNearestTiesToEven); - if (!Info.InConstantContext && St != llvm::APFloatBase::opOK && - FPO.isFPConstrained()) { - Info.FFDiag(E, diag::note_constexpr_float_arithmetic_strict); - return false; - } - return true; + llvm::RoundingMode RM = getActiveRoundingMode(Info, E); + APFloat::opStatus St = Result.convertFromAPInt(Value, Value.isSigned(), RM); + return checkFloatingPointResult(Info, E, St); } static bool truncateBitfieldValue(EvalInfo &Info, const Expr *E, @@ -2654,53 +2766,6 @@ static bool truncateBitfieldValue(EvalInfo &Info, const Expr *E, return true; } -static bool EvalAndBitcastToAPInt(EvalInfo &Info, const Expr *E, - llvm::APInt &Res) { - APValue SVal; - if (!Evaluate(SVal, Info, E)) - return false; - if (SVal.isInt()) { - Res = SVal.getInt(); - return true; - } - if (SVal.isFloat()) { - Res = SVal.getFloat().bitcastToAPInt(); - return true; - } - if (SVal.isVector()) { - QualType VecTy = E->getType(); - unsigned VecSize = Info.Ctx.getTypeSize(VecTy); - QualType EltTy = VecTy->castAs<VectorType>()->getElementType(); - unsigned EltSize = Info.Ctx.getTypeSize(EltTy); - bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian(); - Res = llvm::APInt::getNullValue(VecSize); - for (unsigned i = 0; i < SVal.getVectorLength(); i++) { - APValue &Elt = SVal.getVectorElt(i); - llvm::APInt EltAsInt; - if (Elt.isInt()) { - EltAsInt = Elt.getInt(); - } else if (Elt.isFloat()) { - EltAsInt = Elt.getFloat().bitcastToAPInt(); - } else { - // Don't try to handle vectors of anything other than int or float - // (not sure if it's possible to hit this case). - Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr); - return false; - } - unsigned BaseEltSize = EltAsInt.getBitWidth(); - if (BigEndian) - Res |= EltAsInt.zextOrTrunc(VecSize).rotr(i*EltSize+BaseEltSize); - else - Res |= EltAsInt.zextOrTrunc(VecSize).rotl(i*EltSize); - } - return true; - } - // Give up if the input isn't an int, float, or vector. For example, we - // reject "(v4i16)(intptr_t)&a". - Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr); - return false; -} - /// Perform the given integer operation, which is known to need at most BitWidth /// bits, and check for overflow in the original type (if that type was not an /// unsigned type). @@ -2720,16 +2785,19 @@ static bool CheckedIntArithmetic(EvalInfo &Info, const Expr *E, if (Info.checkingForUndefinedBehavior()) Info.Ctx.getDiagnostics().Report(E->getExprLoc(), diag::warn_integer_constant_overflow) - << toString(Result, 10) << E->getType(); + << toString(Result, 10, Result.isSigned(), /*formatAsCLiteral=*/false, + /*UpperCase=*/true, /*InsertSeparators=*/true) + << E->getType() << E->getSourceRange(); return HandleOverflow(Info, E, Value, E->getType()); } return true; } /// Perform the given binary integer operation. -static bool handleIntIntBinOp(EvalInfo &Info, const Expr *E, const APSInt &LHS, - BinaryOperatorKind Opcode, APSInt RHS, - APSInt &Result) { +static bool handleIntIntBinOp(EvalInfo &Info, const BinaryOperator *E, + const APSInt &LHS, BinaryOperatorKind Opcode, + APSInt RHS, APSInt &Result) { + bool HandleOverflowResult = true; switch (Opcode) { default: Info.FFDiag(E); @@ -2749,17 +2817,18 @@ static bool handleIntIntBinOp(EvalInfo &Info, const Expr *E, const APSInt &LHS, case BO_Div: case BO_Rem: if (RHS == 0) { - Info.FFDiag(E, diag::note_expr_divide_by_zero); + Info.FFDiag(E, diag::note_expr_divide_by_zero) + << E->getRHS()->getSourceRange(); return false; } - Result = (Opcode == BO_Rem ? LHS % RHS : LHS / RHS); // Check for overflow case: INT_MIN / -1 or INT_MIN % -1. APSInt supports // this operation and gives the two's complement result. - if (RHS.isNegative() && RHS.isAllOnesValue() && - LHS.isSigned() && LHS.isMinSignedValue()) - return HandleOverflow(Info, E, -LHS.extend(LHS.getBitWidth() + 1), - E->getType()); - return true; + if (RHS.isNegative() && RHS.isAllOnes() && LHS.isSigned() && + LHS.isMinSignedValue()) + HandleOverflowResult = HandleOverflow( + Info, E, -LHS.extend(LHS.getBitWidth() + 1), E->getType()); + Result = (Opcode == BO_Rem ? LHS % RHS : LHS / RHS); + return HandleOverflowResult; case BO_Shl: { if (Info.getLangOpts().OpenCL) // OpenCL 6.3j: shift values are effectively % word size of LHS. @@ -2770,6 +2839,8 @@ static bool handleIntIntBinOp(EvalInfo &Info, const Expr *E, const APSInt &LHS, // During constant-folding, a negative shift is an opposite shift. Such // a shift is not a constant expression. Info.CCEDiag(E, diag::note_constexpr_negative_shift) << RHS; + if (!Info.noteUndefinedBehavior()) + return false; RHS = -RHS; goto shift_right; } @@ -2780,15 +2851,22 @@ static bool handleIntIntBinOp(EvalInfo &Info, const Expr *E, const APSInt &LHS, if (SA != RHS) { Info.CCEDiag(E, diag::note_constexpr_large_shift) << RHS << E->getType() << LHS.getBitWidth(); + if (!Info.noteUndefinedBehavior()) + return false; } else if (LHS.isSigned() && !Info.getLangOpts().CPlusPlus20) { // C++11 [expr.shift]p2: A signed left shift must have a non-negative // operand, and must not overflow the corresponding unsigned type. // C++2a [expr.shift]p2: E1 << E2 is the unique value congruent to // E1 x 2^E2 module 2^N. - if (LHS.isNegative()) + if (LHS.isNegative()) { Info.CCEDiag(E, diag::note_constexpr_lshift_of_negative) << LHS; - else if (LHS.countLeadingZeros() < SA) + if (!Info.noteUndefinedBehavior()) + return false; + } else if (LHS.countl_zero() < SA) { Info.CCEDiag(E, diag::note_constexpr_lshift_discards); + if (!Info.noteUndefinedBehavior()) + return false; + } } Result = LHS << SA; return true; @@ -2803,6 +2881,8 @@ static bool handleIntIntBinOp(EvalInfo &Info, const Expr *E, const APSInt &LHS, // During constant-folding, a negative shift is an opposite shift. Such a // shift is not a constant expression. Info.CCEDiag(E, diag::note_constexpr_negative_shift) << RHS; + if (!Info.noteUndefinedBehavior()) + return false; RHS = -RHS; goto shift_left; } @@ -2810,9 +2890,13 @@ static bool handleIntIntBinOp(EvalInfo &Info, const Expr *E, const APSInt &LHS, // C++11 [expr.shift]p1: Shift width must be less than the bit width of the // shifted type. unsigned SA = (unsigned) RHS.getLimitedValue(LHS.getBitWidth()-1); - if (SA != RHS) + if (SA != RHS) { Info.CCEDiag(E, diag::note_constexpr_large_shift) << RHS << E->getType() << LHS.getBitWidth(); + if (!Info.noteUndefinedBehavior()) + return false; + } + Result = LHS >> SA; return true; } @@ -2832,8 +2916,7 @@ static bool handleIntIntBinOp(EvalInfo &Info, const Expr *E, const APSInt &LHS, static bool handleFloatFloatBinOp(EvalInfo &Info, const BinaryOperator *E, APFloat &LHS, BinaryOperatorKind Opcode, const APFloat &RHS) { - bool DynamicRM; - llvm::RoundingMode RM = getActiveRoundingMode(Info, E, DynamicRM); + llvm::RoundingMode RM = getActiveRoundingMode(Info, E); APFloat::opStatus St; switch (Opcode) { default: @@ -2933,6 +3016,11 @@ handleCompareOpForVectorHelper(const APTy &LHSValue, BinaryOperatorKind Opcode, break; } + // The boolean operations on these vector types use an instruction that + // results in a mask of '-1' for the 'truth' value. Ensure that we negate 1 + // to -1 to make sure that we produce the correct value. + Result.negate(); + return true; } @@ -3142,9 +3230,14 @@ static bool HandleLValueIndirectMember(EvalInfo &Info, const Expr *E, return true; } +enum class SizeOfType { + SizeOf, + DataSizeOf, +}; + /// Get the size of the given type in char units. -static bool HandleSizeof(EvalInfo &Info, SourceLocation Loc, - QualType Type, CharUnits &Size) { +static bool HandleSizeof(EvalInfo &Info, SourceLocation Loc, QualType Type, + CharUnits &Size, SizeOfType SOT = SizeOfType::SizeOf) { // sizeof(void), __alignof__(void), sizeof(function) = 1 as a gcc // extension. if (Type->isVoidType() || Type->isFunctionType()) { @@ -3164,7 +3257,10 @@ static bool HandleSizeof(EvalInfo &Info, SourceLocation Loc, return false; } - Size = Info.Ctx.getTypeSizeInChars(Type); + if (SOT == SizeOfType::SizeOf) + Size = Info.Ctx.getTypeSizeInChars(Type); + else + Size = Info.Ctx.getTypeInfoDataSizeInChars(Type).Width; return true; } @@ -3273,6 +3369,9 @@ static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E, return false; } + if (E->isValueDependent()) + return false; + // Dig out the initializer, and use the declaration which it's attached to. // FIXME: We should eventually check whether the variable has a reachable // initializing declaration. @@ -3307,12 +3406,9 @@ static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E, // Check that we can fold the initializer. In C++, we will have already done // this in the cases where it matters for conformance. - SmallVector<PartialDiagnosticAt, 8> Notes; - if (!VD->evaluateValue(Notes)) { - Info.FFDiag(E, diag::note_constexpr_var_init_non_constant, - Notes.size() + 1) << VD; + if (!VD->evaluateValue()) { + Info.FFDiag(E, diag::note_constexpr_var_init_non_constant, 1) << VD; NoteLValueLocation(Info, Base); - Info.addNotes(Notes); return false; } @@ -3382,8 +3478,7 @@ static APSInt extractStringLiteralCharacter(EvalInfo &Info, const Expr *Lit, assert(CAT && "string literal isn't an array"); QualType CharType = CAT->getElementType(); assert(CharType->isIntegerType() && "unexpected character type"); - - APSInt Value(S->getCharByteWidth() * Info.Ctx.getCharWidth(), + APSInt Value(Info.Ctx.getTypeSize(CharType), CharType->isUnsignedIntegerType()); if (Index < S->getLength()) Value = S->getCodeUnit(Index); @@ -3403,10 +3498,10 @@ static void expandStringLiteral(EvalInfo &Info, const StringLiteral *S, QualType CharType = CAT->getElementType(); assert(CharType->isIntegerType() && "unexpected character type"); - unsigned Elts = CAT->getSize().getZExtValue(); + unsigned Elts = CAT->getZExtSize(); Result = APValue(APValue::UninitArray(), std::min(S->getLength(), Elts), Elts); - APSInt Value(S->getCharByteWidth() * Info.Ctx.getCharWidth(), + APSInt Value(Info.Ctx.getTypeSize(CharType), CharType->isUnsignedIntegerType()); if (Result.hasArrayFiller()) Result.getArrayFiller() = APValue(Value); @@ -3456,7 +3551,7 @@ static bool isReadByLvalueToRvalueConversion(const CXXRecordDecl *RD) { return false; for (auto *Field : RD->fields()) - if (!Field->isUnnamedBitfield() && + if (!Field->isUnnamedBitField() && isReadByLvalueToRvalueConversion(Field->getType())) return true; @@ -3542,6 +3637,14 @@ static bool lifetimeStartedInEvaluation(EvalInfo &Info, llvm_unreachable("unknown evaluating decl kind"); } +static bool CheckArraySize(EvalInfo &Info, const ConstantArrayType *CAT, + SourceLocation CallLoc = {}) { + return Info.CheckArraySize( + CAT->getSizeExpr() ? CAT->getSizeExpr()->getBeginLoc() : CallLoc, + CAT->getNumAddressingBits(Info.Ctx), CAT->getZExtSize(), + /*Diag=*/true); +} + namespace { /// A handle to a complete object (an object that is not a subobject of /// another object). @@ -3623,7 +3726,8 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj, !isValidIndeterminateAccess(handler.AccessKind))) { if (!Info.checkingPotentialConstantExpression()) Info.FFDiag(E, diag::note_constexpr_access_uninit) - << handler.AccessKind << O->isIndeterminate(); + << handler.AccessKind << O->isIndeterminate() + << E->getSourceRange(); return handler.failed(); } @@ -3633,9 +3737,9 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj, if ((ObjType.isConstQualified() || ObjType.isVolatileQualified()) && ObjType->isRecordType() && Info.isEvaluatingCtorDtor( - Obj.Base, llvm::makeArrayRef(Sub.Entries.begin(), - Sub.Entries.begin() + I)) != - ConstructionPhase::None) { + Obj.Base, + llvm::ArrayRef(Sub.Entries.begin(), Sub.Entries.begin() + I)) != + ConstructionPhase::None) { ObjType = Info.Ctx.getCanonicalType(ObjType); ObjType.removeLocalConst(); ObjType.removeLocalVolatile(); @@ -3716,6 +3820,9 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj, if (O->getArrayInitializedElts() > Index) O = &O->getArrayInitializedElt(Index); else if (!isRead(handler.AccessKind)) { + if (!CheckArraySize(Info, CAT, E->getExprLoc())) + return handler.failed(); + expandArray(*O, Index); O = &O->getArrayInitializedElt(Index); } else @@ -4010,6 +4117,16 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E, return CompleteObject(LVal.Base, &V, GD->getType()); } + // Allow reading the APValue from an UnnamedGlobalConstantDecl. + if (auto *GCD = dyn_cast<UnnamedGlobalConstantDecl>(D)) { + if (isModification(AK)) { + Info.FFDiag(E, diag::note_constexpr_modify_global); + return CompleteObject(); + } + return CompleteObject(LVal.Base, const_cast<APValue *>(&GCD->getValue()), + GCD->getType()); + } + // Allow reading from template parameter objects. if (auto *TPO = dyn_cast<TemplateParamObjectDecl>(D)) { if (isModification(AK)) { @@ -4038,6 +4155,10 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E, } bool IsConstant = BaseType.isConstant(Info.Ctx); + bool ConstexprVar = false; + if (const auto *VD = dyn_cast_if_present<VarDecl>( + Info.EvaluatingDecl.dyn_cast<const ValueDecl *>())) + ConstexprVar = VD->isConstexpr(); // Unless we're looking at a local variable or argument in a constexpr call, // the variable we're reading must be const. @@ -4057,6 +4178,9 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E, return CompleteObject(); } else if (VD->isConstexpr()) { // OK, we can read this variable. + } else if (Info.getLangOpts().C23 && ConstexprVar) { + Info.FFDiag(E); + return CompleteObject(); } else if (BaseType->isIntegralOrEnumerationType()) { if (!IsConstant) { if (!IsAccess) @@ -4105,7 +4229,7 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E, if (!evaluateVarDeclInit(Info, E, VD, Frame, LVal.getLValueVersion(), BaseVal)) return CompleteObject(); } else if (DynamicAllocLValue DA = LVal.Base.dyn_cast<DynamicAllocLValue>()) { - Optional<DynAlloc*> Alloc = Info.lookupDynamicAlloc(DA); + std::optional<DynAlloc *> Alloc = Info.lookupDynamicAlloc(DA); if (!Alloc) { Info.FFDiag(E, diag::note_constexpr_access_deleted_object) << AK; return CompleteObject(); @@ -4230,9 +4354,33 @@ handleLValueToRValueConversion(EvalInfo &Info, const Expr *Conv, QualType Type, Info.FFDiag(Conv); return false; } + APValue Lit; if (!Evaluate(Lit, Info, CLE->getInitializer())) return false; + + // According to GCC info page: + // + // 6.28 Compound Literals + // + // As an optimization, G++ sometimes gives array compound literals longer + // lifetimes: when the array either appears outside a function or has a + // const-qualified type. If foo and its initializer had elements of type + // char *const rather than char *, or if foo were a global variable, the + // array would have static storage duration. But it is probably safest + // just to avoid the use of array compound literals in C++ code. + // + // Obey that rule by checking constness for converted array types. + + QualType CLETy = CLE->getType(); + if (CLETy->isArrayType() && !Type->isArrayType()) { + if (!CLETy.isConstant(Info.Ctx)) { + Info.FFDiag(Conv); + Info.Note(CLE->getExprLoc(), diag::note_declared_at); + return false; + } + } + CompleteObject LitObj(LVal.Base, &Lit, Base->getType()); return extractSubobject(Info, Conv, LitObj, LVal.Designator, RVal, AK); } else if (isa<StringLiteral>(Base) || isa<PredefinedExpr>(Base)) { @@ -4316,6 +4464,11 @@ struct CompoundAssignSubobjectHandler { return foundPointer(Subobj, SubobjType); case APValue::Vector: return foundVector(Subobj, SubobjType); + case APValue::Indeterminate: + Info.FFDiag(E, diag::note_constexpr_access_uninit) + << /*read of=*/0 << /*uninitialized object=*/1 + << E->getLHS()->getSourceRange(); + return false; default: // FIXME: can this happen? Info.FFDiag(E); @@ -4522,11 +4675,13 @@ struct IncDecSubobjectHandler { if (Old) *Old = APValue(Value); APFloat One(Value.getSemantics(), 1); + llvm::RoundingMode RM = getActiveRoundingMode(Info, E); + APFloat::opStatus St; if (AccessKind == AK_Increment) - Value.add(One, APFloat::rmNearestTiesToEven); + St = Value.add(One, RM); else - Value.subtract(One, APFloat::rmNearestTiesToEven); - return true; + St = Value.subtract(One, RM); + return checkFloatingPointResult(Info, E, St); } bool foundPointer(APValue &Subobj, QualType SubobjType) { if (!checkConst(SubobjType)) @@ -4580,6 +4735,9 @@ static bool EvaluateObjectArgument(EvalInfo &Info, const Expr *Object, if (Object->getType()->isLiteralType(Info.Ctx)) return EvaluateTemporary(Object, This, Info); + if (Object->getType()->isRecordType() && Object->isPRValue()) + return EvaluateTemporary(Object, This, Info); + Info.FFDiag(Object, diag::note_constexpr_nonliteral) << Object->getType(); return false; } @@ -4735,8 +4893,13 @@ static bool HandleBaseToDerivedCast(EvalInfo &Info, const CastExpr *E, /// Get the value to use for a default-initialized object of type T. /// Return false if it encounters something invalid. -static bool getDefaultInitValue(QualType T, APValue &Result) { +static bool handleDefaultInitValue(QualType T, APValue &Result) { bool Success = true; + + // If there is already a value present don't overwrite it. + if (!Result.isAbsent()) + return true; + if (auto *RD = T->getAsCXXRecordDecl()) { if (RD->isInvalidDecl()) { Result = APValue(); @@ -4753,23 +4916,24 @@ static bool getDefaultInitValue(QualType T, APValue &Result) { for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), End = RD->bases_end(); I != End; ++I, ++Index) - Success &= getDefaultInitValue(I->getType(), Result.getStructBase(Index)); + Success &= + handleDefaultInitValue(I->getType(), Result.getStructBase(Index)); for (const auto *I : RD->fields()) { - if (I->isUnnamedBitfield()) + if (I->isUnnamedBitField()) continue; - Success &= getDefaultInitValue(I->getType(), - Result.getStructField(I->getFieldIndex())); + Success &= handleDefaultInitValue( + I->getType(), Result.getStructField(I->getFieldIndex())); } return Success; } if (auto *AT = dyn_cast_or_null<ConstantArrayType>(T->getAsArrayTypeUnsafe())) { - Result = APValue(APValue::UninitArray(), 0, AT->getSize().getZExtValue()); + Result = APValue(APValue::UninitArray(), 0, AT->getZExtSize()); if (Result.hasArrayFiller()) Success &= - getDefaultInitValue(AT->getElementType(), Result.getArrayFiller()); + handleDefaultInitValue(AT->getElementType(), Result.getArrayFiller()); return Success; } @@ -4796,6 +4960,8 @@ enum EvalStmtResult { } static bool EvaluateVarDecl(EvalInfo &Info, const VarDecl *VD) { + if (VD->isInvalidDecl()) + return false; // We don't need to evaluate the initializer for a static local. if (!VD->hasLocalStorage()) return true; @@ -4808,7 +4974,7 @@ static bool EvaluateVarDecl(EvalInfo &Info, const VarDecl *VD) { if (!InitE) { if (VD->getType()->isDependentType()) return Info.noteSideEffect(); - return getDefaultInitValue(VD->getType(), Val); + return handleDefaultInitValue(VD->getType(), Val); } if (InitE->isValueDependent()) return false; @@ -4932,8 +5098,14 @@ static EvalStmtResult EvaluateSwitch(StmtResult &Result, EvalInfo &Info, if (SS->getConditionVariable() && !EvaluateDecl(Info, SS->getConditionVariable())) return ESR_Failed; + if (SS->getCond()->isValueDependent()) { + // We don't know what the value is, and which branch should jump to. + EvaluateDependentExpr(SS->getCond(), Info); + return ESR_Failed; + } if (!EvaluateInteger(SS->getCond(), Value, Info)) return ESR_Failed; + if (!CondScope.destroy()) return ESR_Failed; } @@ -4984,6 +5156,20 @@ static EvalStmtResult EvaluateSwitch(StmtResult &Result, EvalInfo &Info, llvm_unreachable("Invalid EvalStmtResult!"); } +static bool CheckLocalVariableDeclaration(EvalInfo &Info, const VarDecl *VD) { + // An expression E is a core constant expression unless the evaluation of E + // would evaluate one of the following: [C++23] - a control flow that passes + // through a declaration of a variable with static or thread storage duration + // unless that variable is usable in constant expressions. + if (VD->isLocalVarDecl() && VD->isStaticLocal() && + !VD->isUsableInConstantExpressions(Info.Ctx)) { + Info.CCEDiag(VD->getLocation(), diag::note_constexpr_static_local) + << (VD->getTSCSpec() == TSCS_unspecified ? 0 : 1) << VD; + return false; + } + return true; +} + // Evaluate a statement. static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info, const Stmt *S, const SwitchCase *Case) { @@ -5094,6 +5280,8 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info, const DeclStmt *DS = cast<DeclStmt>(S); for (const auto *D : DS->decls()) { if (const auto *VD = dyn_cast<VarDecl>(D)) { + if (!CheckLocalVariableDeclaration(Info, VD)) + return ESR_Failed; if (VD->hasLocalStorage() && !VD->getInit()) if (!EvaluateVarDecl(Info, VD)) return ESR_Failed; @@ -5128,7 +5316,7 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info, return ESR_Succeeded; } - Info.FFDiag(S->getBeginLoc()); + Info.FFDiag(S->getBeginLoc()) << S->getSourceRange(); return ESR_Failed; case Stmt::NullStmtClass: @@ -5137,6 +5325,9 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info, case Stmt::DeclStmtClass: { const DeclStmt *DS = cast<DeclStmt>(S); for (const auto *D : DS->decls()) { + const VarDecl *VD = dyn_cast_or_null<VarDecl>(D); + if (VD && !CheckLocalVariableDeclaration(Info, VD)) + return ESR_Failed; // Each declaration initialization is its own full-expression. FullExpressionRAII Scope(Info); if (!EvaluateDecl(Info, D) && !Info.noteFailure()) @@ -5196,7 +5387,14 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info, } } bool Cond; - if (!EvaluateCond(Info, IS->getConditionVariable(), IS->getCond(), Cond)) + if (IS->isConsteval()) { + Cond = IS->isNonNegatedConsteval(); + // If we are not in a constant context, if consteval should not evaluate + // to true. + if (!Info.InConstantContext) + Cond = !Cond; + } else if (!EvaluateCond(Info, IS->getConditionVariable(), IS->getCond(), + Cond)) return ESR_Failed; if (const Stmt *SubStmt = Cond ? IS->getThen() : IS->getElse()) { @@ -5321,6 +5519,11 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info, return ESR; } + // In error-recovery cases it's possible to get here even if we failed to + // synthesize the __begin and __end variables. + if (!FS->getBeginStmt() || !FS->getEndStmt() || !FS->getCond()) + return ESR_Failed; + // Create the __begin and __end iterators. ESR = EvaluateStmt(Result, Info, FS->getBeginStmt()); if (ESR != ESR_Succeeded) { @@ -5395,11 +5598,40 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info, case Stmt::LabelStmtClass: return EvaluateStmt(Result, Info, cast<LabelStmt>(S)->getSubStmt(), Case); - case Stmt::AttributedStmtClass: - // As a general principle, C++11 attributes can be ignored without - // any semantic impact. - return EvaluateStmt(Result, Info, cast<AttributedStmt>(S)->getSubStmt(), - Case); + case Stmt::AttributedStmtClass: { + const auto *AS = cast<AttributedStmt>(S); + const auto *SS = AS->getSubStmt(); + MSConstexprContextRAII ConstexprContext( + *Info.CurrentCall, hasSpecificAttr<MSConstexprAttr>(AS->getAttrs()) && + isa<ReturnStmt>(SS)); + + auto LO = Info.getCtx().getLangOpts(); + if (LO.CXXAssumptions && !LO.MSVCCompat) { + for (auto *Attr : AS->getAttrs()) { + auto *AA = dyn_cast<CXXAssumeAttr>(Attr); + if (!AA) + continue; + + auto *Assumption = AA->getAssumption(); + if (Assumption->isValueDependent()) + return ESR_Failed; + + if (Assumption->HasSideEffects(Info.getCtx())) + continue; + + bool Value; + if (!EvaluateAsBooleanCondition(Assumption, Value, Info)) + return ESR_Failed; + if (!Value) { + Info.CCEDiag(Assumption->getExprLoc(), + diag::note_constexpr_assumption_failed); + return ESR_Failed; + } + } + } + + return EvaluateStmt(Result, Info, SS, Case); + } case Stmt::CaseStmtClass: case Stmt::DefaultStmtClass: @@ -5470,7 +5702,9 @@ static bool CheckConstexprFunction(EvalInfo &Info, SourceLocation CallLoc, } // Can we evaluate this function call? - if (Definition && Definition->isConstexpr() && Body) + if (Definition && Body && + (Definition->isConstexpr() || (Info.CurrentCall->CanEvalMSConstexpr && + Definition->hasAttr<MSConstexprAttr>()))) return true; if (Info.getLangOpts().CPlusPlus11) { @@ -5581,13 +5815,15 @@ static const CXXRecordDecl *getBaseClassType(SubobjectDesignator &Designator, } /// Determine the dynamic type of an object. -static Optional<DynamicType> ComputeDynamicType(EvalInfo &Info, const Expr *E, - LValue &This, AccessKinds AK) { +static std::optional<DynamicType> ComputeDynamicType(EvalInfo &Info, + const Expr *E, + LValue &This, + AccessKinds AK) { // If we don't have an lvalue denoting an object of class type, there is no // meaningful dynamic type. (We consider objects of non-class type to have no // dynamic type.) if (!checkDynamicType(Info, E, This, AK, true)) - return None; + return std::nullopt; // Refuse to compute a dynamic type in the presence of virtual bases. This // shouldn't happen other than in constant-folding situations, since literal @@ -5599,7 +5835,7 @@ static Optional<DynamicType> ComputeDynamicType(EvalInfo &Info, const Expr *E, This.Designator.MostDerivedType->getAsCXXRecordDecl(); if (!Class || Class->getNumVBases()) { Info.FFDiag(E); - return None; + return std::nullopt; } // FIXME: For very deep class hierarchies, it might be beneficial to use a @@ -5632,14 +5868,14 @@ static Optional<DynamicType> ComputeDynamicType(EvalInfo &Info, const Expr *E, // 'This', so that object has not yet begun its period of construction and // any polymorphic operation on it results in undefined behavior. Info.FFDiag(E); - return None; + return std::nullopt; } /// Perform virtual dispatch. static const CXXMethodDecl *HandleVirtualDispatch( EvalInfo &Info, const Expr *E, LValue &This, const CXXMethodDecl *Found, llvm::SmallVectorImpl<QualType> &CovariantAdjustmentPath) { - Optional<DynamicType> DynType = ComputeDynamicType( + std::optional<DynamicType> DynType = ComputeDynamicType( Info, E, This, isa<CXXDestructorDecl>(Found) ? AK_Destroy : AK_MemberCall); if (!DynType) @@ -5664,7 +5900,7 @@ static const CXXMethodDecl *HandleVirtualDispatch( // C++2a [class.abstract]p6: // the effect of making a virtual call to a pure virtual function [...] is // undefined - if (Callee->isPure()) { + if (Callee->isPureVirtual()) { Info.FFDiag(E, diag::note_constexpr_pure_virtual_call, 1) << Callee; Info.Note(Callee->getLocation(), diag::note_declared_at); return nullptr; @@ -5757,7 +5993,7 @@ static bool HandleDynamicCast(EvalInfo &Info, const ExplicitCastExpr *E, // For all the other cases, we need the pointer to point to an object within // its lifetime / period of construction / destruction, and we need to know // its dynamic type. - Optional<DynamicType> DynType = + std::optional<DynamicType> DynType = ComputeDynamicType(Info, E, Ptr, AK_DynamicCast); if (!DynType) return false; @@ -5871,7 +6107,7 @@ struct StartLifetimeOfUnionMemberHandler { return false; } APValue Result; - Failed = !getDefaultInitValue(Field->getType(), Result); + Failed = !handleDefaultInitValue(Field->getType(), Result); Subobj.setUnion(Field, Result); return true; } @@ -5890,8 +6126,9 @@ const AccessKinds StartLifetimeOfUnionMemberHandler::AccessKind; /// operator whose left-hand side might involve a union member access. If it /// does, implicitly start the lifetime of any accessed union elements per /// C++20 [class.union]5. -static bool HandleUnionActiveMemberChange(EvalInfo &Info, const Expr *LHSExpr, - const LValue &LHS) { +static bool MaybeHandleUnionActiveMemberChange(EvalInfo &Info, + const Expr *LHSExpr, + const LValue &LHS) { if (LHS.InvalidBase || LHS.Designator.Invalid) return false; @@ -5946,8 +6183,14 @@ static bool HandleUnionActiveMemberChange(EvalInfo &Info, const Expr *LHSExpr, break; // Walk path backwards as we walk up from the base to the derived class. for (const CXXBaseSpecifier *Elt : llvm::reverse(ICE->path())) { + if (Elt->isVirtual()) { + // A class with virtual base classes never has a trivial default + // constructor, so S(E) is empty in this case. + E = nullptr; + break; + } + --PathLength; - (void)Elt; assert(declaresSameEntity(Elt->getType()->getAsCXXRecordDecl(), LHS.Designator.Entries[PathLength] .getAsBaseOrMember().getPointer())); @@ -6028,7 +6271,7 @@ static bool EvaluateArgs(ArrayRef<const Expr *> Args, CallRef Call, unsigned ASTIdx = Idx.getASTIndex(); if (ASTIdx >= Args.size()) continue; - ForbiddenNullArgs[ASTIdx] = 1; + ForbiddenNullArgs[ASTIdx] = true; } } } @@ -6072,13 +6315,13 @@ static bool handleTrivialCopy(EvalInfo &Info, const ParmVarDecl *Param, /// Evaluate a function call. static bool HandleFunctionCall(SourceLocation CallLoc, const FunctionDecl *Callee, const LValue *This, - ArrayRef<const Expr *> Args, CallRef Call, - const Stmt *Body, EvalInfo &Info, + const Expr *E, ArrayRef<const Expr *> Args, + CallRef Call, const Stmt *Body, EvalInfo &Info, APValue &Result, const LValue *ResultSlot) { if (!Info.CheckCallLimit(CallLoc)) return false; - CallStackFrame Frame(Info, CallLoc, Callee, This, Call); + CallStackFrame Frame(Info, E->getSourceRange(), Callee, This, E, Call); // For a trivial copy or move assignment, perform an APValue copy. This is // essential for unions, where the operations performed by the assignment @@ -6097,9 +6340,6 @@ static bool HandleFunctionCall(SourceLocation CallLoc, if (!handleTrivialCopy(Info, MD->getParamDecl(0), Args[0], RHSValue, MD->getParent()->isUnion())) return false; - if (Info.getLangOpts().CPlusPlus20 && MD->isTrivial() && - !HandleUnionActiveMemberChange(Info, Args[0], *This)) - return false; if (!handleAssignment(Info, Args[0], *This, MD->getThisType(), RHSValue)) return false; @@ -6146,7 +6386,7 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This, Info, ObjectUnderConstruction{This.getLValueBase(), This.Designator.Entries}, RD->getNumBases()); - CallStackFrame Frame(Info, CallLoc, Definition, &This, Call); + CallStackFrame Frame(Info, E->getSourceRange(), Definition, &This, E, Call); // FIXME: Creating an APValue just to hold a nonexistent return value is // wasteful. @@ -6218,8 +6458,8 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This, // Default-initialize any fields with no explicit initializer. for (; !declaresSameEntity(*FieldIt, FD); ++FieldIt) { assert(FieldIt != RD->field_end() && "missing field?"); - if (!FieldIt->isUnnamedBitfield()) - Success &= getDefaultInitValue( + if (!FieldIt->isUnnamedBitField()) + Success &= handleDefaultInitValue( FieldIt->getType(), Result.getStructField(FieldIt->getFieldIndex())); } @@ -6238,7 +6478,7 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This, // Non-virtual base classes are initialized in the order in the class // definition. We have already checked for virtual base classes. assert(!BaseIt->isVirtual() && "virtual base for literal type"); - assert(Info.Ctx.hasSameType(BaseIt->getType(), BaseType) && + assert(Info.Ctx.hasSameUnqualifiedType(BaseIt->getType(), BaseType) && "base class initializers not in expected order"); ++BaseIt; #endif @@ -6276,7 +6516,8 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This, // FIXME: This immediately starts the lifetime of all members of // an anonymous struct. It would be preferable to strictly start // member lifetime in initialization order. - Success &= getDefaultInitValue(Info.Ctx.getRecordType(CD), *Value); + Success &= + handleDefaultInitValue(Info.Ctx.getRecordType(CD), *Value); } // Store Subobject as its parent before updating it for the last element // in the chain. @@ -6327,8 +6568,8 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This, // Default-initialize any remaining fields. if (!RD->isUnion()) { for (; FieldIt != RD->field_end(); ++FieldIt) { - if (!FieldIt->isUnnamedBitfield()) - Success &= getDefaultInitValue( + if (!FieldIt->isUnnamedBitField()) + Success &= handleDefaultInitValue( FieldIt->getType(), Result.getStructField(FieldIt->getFieldIndex())); } @@ -6354,7 +6595,7 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This, CallScope.destroy(); } -static bool HandleDestructionImpl(EvalInfo &Info, SourceLocation CallLoc, +static bool HandleDestructionImpl(EvalInfo &Info, SourceRange CallRange, const LValue &This, APValue &Value, QualType T) { // Objects can only be destroyed while they're within their lifetimes. @@ -6364,20 +6605,24 @@ static bool HandleDestructionImpl(EvalInfo &Info, SourceLocation CallLoc, if (Value.isAbsent() && !T->isNullPtrType()) { APValue Printable; This.moveInto(Printable); - Info.FFDiag(CallLoc, diag::note_constexpr_destroy_out_of_lifetime) - << Printable.getAsString(Info.Ctx, Info.Ctx.getLValueReferenceType(T)); + Info.FFDiag(CallRange.getBegin(), + diag::note_constexpr_destroy_out_of_lifetime) + << Printable.getAsString(Info.Ctx, Info.Ctx.getLValueReferenceType(T)); return false; } // Invent an expression for location purposes. // FIXME: We shouldn't need to do this. - OpaqueValueExpr LocE(CallLoc, Info.Ctx.IntTy, VK_PRValue); + OpaqueValueExpr LocE(CallRange.getBegin(), Info.Ctx.IntTy, VK_PRValue); // For arrays, destroy elements right-to-left. if (const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(T)) { - uint64_t Size = CAT->getSize().getZExtValue(); + uint64_t Size = CAT->getZExtSize(); QualType ElemT = CAT->getElementType(); + if (!CheckArraySize(Info, CAT, CallRange.getBegin())) + return false; + LValue ElemLV = This; ElemLV.addArray(Info, &LocE, CAT); if (!HandleLValueArrayAdjustment(Info, &LocE, ElemLV, ElemT, Size)) @@ -6392,7 +6637,7 @@ static bool HandleDestructionImpl(EvalInfo &Info, SourceLocation CallLoc, for (; Size != 0; --Size) { APValue &Elem = Value.getArrayInitializedElt(Size - 1); if (!HandleLValueArrayAdjustment(Info, &LocE, ElemLV, ElemT, -1) || - !HandleDestructionImpl(Info, CallLoc, ElemLV, Elem, ElemT)) + !HandleDestructionImpl(Info, CallRange, ElemLV, Elem, ElemT)) return false; } @@ -6404,7 +6649,9 @@ static bool HandleDestructionImpl(EvalInfo &Info, SourceLocation CallLoc, const CXXRecordDecl *RD = T->getAsCXXRecordDecl(); if (!RD) { if (T.isDestructedType()) { - Info.FFDiag(CallLoc, diag::note_constexpr_unsupported_destruction) << T; + Info.FFDiag(CallRange.getBegin(), + diag::note_constexpr_unsupported_destruction) + << T; return false; } @@ -6413,13 +6660,13 @@ static bool HandleDestructionImpl(EvalInfo &Info, SourceLocation CallLoc, } if (RD->getNumVBases()) { - Info.FFDiag(CallLoc, diag::note_constexpr_virtual_base) << RD; + Info.FFDiag(CallRange.getBegin(), diag::note_constexpr_virtual_base) << RD; return false; } const CXXDestructorDecl *DD = RD->getDestructor(); if (!DD && !RD->hasTrivialDestructor()) { - Info.FFDiag(CallLoc); + Info.FFDiag(CallRange.getBegin()); return false; } @@ -6438,16 +6685,17 @@ static bool HandleDestructionImpl(EvalInfo &Info, SourceLocation CallLoc, return true; } - if (!Info.CheckCallLimit(CallLoc)) + if (!Info.CheckCallLimit(CallRange.getBegin())) return false; const FunctionDecl *Definition = nullptr; const Stmt *Body = DD->getBody(Definition); - if (!CheckConstexprFunction(Info, CallLoc, DD, Definition, Body)) + if (!CheckConstexprFunction(Info, CallRange.getBegin(), DD, Definition, Body)) return false; - CallStackFrame Frame(Info, CallLoc, Definition, &This, CallRef()); + CallStackFrame Frame(Info, CallRange, Definition, &This, /*CallExpr=*/nullptr, + CallRef()); // We're now in the period of destruction of this object. unsigned BasesLeft = RD->getNumBases(); @@ -6461,7 +6709,7 @@ static bool HandleDestructionImpl(EvalInfo &Info, SourceLocation CallLoc, // (Note that formally the lifetime ends when the period of destruction // begins, even though certain uses of the object remain valid until the // period of destruction ends.) - Info.FFDiag(CallLoc, diag::note_constexpr_double_destroy); + Info.FFDiag(CallRange.getBegin(), diag::note_constexpr_double_destroy); return false; } @@ -6480,9 +6728,9 @@ static bool HandleDestructionImpl(EvalInfo &Info, SourceLocation CallLoc, // We don't have a good way to iterate fields in reverse, so collect all the // fields first and then walk them backwards. - SmallVector<FieldDecl*, 16> Fields(RD->field_begin(), RD->field_end()); + SmallVector<FieldDecl*, 16> Fields(RD->fields()); for (const FieldDecl *FD : llvm::reverse(Fields)) { - if (FD->isUnnamedBitfield()) + if (FD->isUnnamedBitField()) continue; LValue Subobject = This; @@ -6490,7 +6738,7 @@ static bool HandleDestructionImpl(EvalInfo &Info, SourceLocation CallLoc, return false; APValue *SubobjectValue = &Value.getStructField(FD->getFieldIndex()); - if (!HandleDestructionImpl(Info, CallLoc, Subobject, *SubobjectValue, + if (!HandleDestructionImpl(Info, CallRange, Subobject, *SubobjectValue, FD->getType())) return false; } @@ -6509,7 +6757,7 @@ static bool HandleDestructionImpl(EvalInfo &Info, SourceLocation CallLoc, return false; APValue *SubobjectValue = &Value.getStructBase(BasesLeft); - if (!HandleDestructionImpl(Info, CallLoc, Subobject, *SubobjectValue, + if (!HandleDestructionImpl(Info, CallRange, Subobject, *SubobjectValue, BaseType)) return false; } @@ -6530,7 +6778,7 @@ struct DestroyObjectHandler { typedef bool result_type; bool failed() { return false; } bool found(APValue &Subobj, QualType SubobjType) { - return HandleDestructionImpl(Info, E->getExprLoc(), This, Subobj, + return HandleDestructionImpl(Info, E->getSourceRange(), This, Subobj, SubobjType); } bool found(APSInt &Value, QualType SubobjType) { @@ -6567,7 +6815,7 @@ static bool HandleDestruction(EvalInfo &Info, SourceLocation Loc, return HandleDestructionImpl(Info, Loc, LV, Value, T); } -/// Perform a call to 'perator new' or to `__builtin_operator_new'. +/// Perform a call to 'operator new' or to `__builtin_operator_new'. static bool HandleOperatorNewCall(EvalInfo &Info, const CallExpr *E, LValue &Result) { if (Info.checkingPotentialConstantExpression() || @@ -6613,18 +6861,17 @@ static bool HandleOperatorNewCall(EvalInfo &Info, const CallExpr *E, return false; } - if (ByteSize.getActiveBits() > ConstantArrayType::getMaxSizeBits(Info.Ctx)) { + if (!Info.CheckArraySize(E->getBeginLoc(), ByteSize.getActiveBits(), + Size.getZExtValue(), /*Diag=*/!IsNothrow)) { if (IsNothrow) { Result.setNull(Info.Ctx, E->getType()); return true; } - - Info.FFDiag(E, diag::note_constexpr_new_too_large) << APSInt(Size, true); return false; } - QualType AllocType = Info.Ctx.getConstantArrayType(ElemType, Size, nullptr, - ArrayType::Normal, 0); + QualType AllocType = Info.Ctx.getConstantArrayType( + ElemType, Size, nullptr, ArraySizeModifier::Normal, 0); APValue *Val = Info.createHeapAlloc(E, AllocType, Result); *Val = APValue(APValue::UninitArray(), 0, Size.getZExtValue()); Result.addArray(Info, E, cast<ConstantArrayType>(AllocType)); @@ -6649,10 +6896,10 @@ static const FunctionDecl *getVirtualOperatorDelete(QualType T) { /// still exists and is of the right kind for the purpose of a deletion. /// /// On success, returns the heap allocation to deallocate. On failure, produces -/// a diagnostic and returns None. -static Optional<DynAlloc *> CheckDeleteKind(EvalInfo &Info, const Expr *E, - const LValue &Pointer, - DynAlloc::Kind DeallocKind) { +/// a diagnostic and returns std::nullopt. +static std::optional<DynAlloc *> CheckDeleteKind(EvalInfo &Info, const Expr *E, + const LValue &Pointer, + DynAlloc::Kind DeallocKind) { auto PointerAsString = [&] { return Pointer.toString(Info.Ctx, Info.Ctx.VoidPtrTy); }; @@ -6663,21 +6910,21 @@ static Optional<DynAlloc *> CheckDeleteKind(EvalInfo &Info, const Expr *E, << PointerAsString(); if (Pointer.Base) NoteLValueLocation(Info, Pointer.Base); - return None; + return std::nullopt; } - Optional<DynAlloc *> Alloc = Info.lookupDynamicAlloc(DA); + std::optional<DynAlloc *> Alloc = Info.lookupDynamicAlloc(DA); if (!Alloc) { Info.FFDiag(E, diag::note_constexpr_double_delete); - return None; + return std::nullopt; } - QualType AllocType = Pointer.Base.getDynamicAllocType(); if (DeallocKind != (*Alloc)->getKind()) { + QualType AllocType = Pointer.Base.getDynamicAllocType(); Info.FFDiag(E, diag::note_constexpr_new_delete_mismatch) << DeallocKind << (*Alloc)->getKind() << AllocType; NoteLValueLocation(Info, Pointer.Base); - return None; + return std::nullopt; } bool Subobject = false; @@ -6691,7 +6938,7 @@ static Optional<DynAlloc *> CheckDeleteKind(EvalInfo &Info, const Expr *E, if (Subobject) { Info.FFDiag(E, diag::note_constexpr_delete_subobject) << PointerAsString() << Pointer.Designator.isOnePastTheEnd(); - return None; + return std::nullopt; } return Alloc; @@ -6743,7 +6990,7 @@ class BitCastBuffer { // FIXME: Its possible under the C++ standard for 'char' to not be 8 bits, but // we don't support a host or target where that is the case. Still, we should // use a more generic type in case we ever do. - SmallVector<Optional<unsigned char>, 32> Bytes; + SmallVector<std::optional<unsigned char>, 32> Bytes; static_assert(std::numeric_limits<unsigned char>::digits >= 8, "Need at least 8 bit unsigned char"); @@ -6755,12 +7002,11 @@ public: : Bytes(Width.getQuantity()), TargetIsLittleEndian(TargetIsLittleEndian) {} - LLVM_NODISCARD - bool readObject(CharUnits Offset, CharUnits Width, - SmallVectorImpl<unsigned char> &Output) const { + [[nodiscard]] bool readObject(CharUnits Offset, CharUnits Width, + SmallVectorImpl<unsigned char> &Output) const { for (CharUnits I = Offset, E = Offset + Width; I != E; ++I) { // If a byte of an integer is uninitialized, then the whole integer is - // uninitalized. + // uninitialized. if (!Bytes[I.getQuantity()]) return false; Output.push_back(*Bytes[I.getQuantity()]); @@ -6824,10 +7070,11 @@ class APValueToBufferConverter { return visitArray(Val, Ty, Offset); case APValue::Struct: return visitRecord(Val, Ty, Offset); + case APValue::Vector: + return visitVector(Val, Ty, Offset); case APValue::ComplexInt: case APValue::ComplexFloat: - case APValue::Vector: case APValue::FixedPoint: // FIXME: We should support these. @@ -6914,6 +7161,72 @@ class APValueToBufferConverter { return true; } + bool visitVector(const APValue &Val, QualType Ty, CharUnits Offset) { + const VectorType *VTy = Ty->castAs<VectorType>(); + QualType EltTy = VTy->getElementType(); + unsigned NElts = VTy->getNumElements(); + unsigned EltSize = + VTy->isExtVectorBoolType() ? 1 : Info.Ctx.getTypeSize(EltTy); + + if ((NElts * EltSize) % Info.Ctx.getCharWidth() != 0) { + // The vector's size in bits is not a multiple of the target's byte size, + // so its layout is unspecified. For now, we'll simply treat these cases + // as unsupported (this should only be possible with OpenCL bool vectors + // whose element count isn't a multiple of the byte size). + Info.FFDiag(BCE->getBeginLoc(), + diag::note_constexpr_bit_cast_invalid_vector) + << Ty.getCanonicalType() << EltSize << NElts + << Info.Ctx.getCharWidth(); + return false; + } + + if (EltTy->isRealFloatingType() && &Info.Ctx.getFloatTypeSemantics(EltTy) == + &APFloat::x87DoubleExtended()) { + // The layout for x86_fp80 vectors seems to be handled very inconsistently + // by both clang and LLVM, so for now we won't allow bit_casts involving + // it in a constexpr context. + Info.FFDiag(BCE->getBeginLoc(), + diag::note_constexpr_bit_cast_unsupported_type) + << EltTy; + return false; + } + + if (VTy->isExtVectorBoolType()) { + // Special handling for OpenCL bool vectors: + // Since these vectors are stored as packed bits, but we can't write + // individual bits to the BitCastBuffer, we'll buffer all of the elements + // together into an appropriately sized APInt and write them all out at + // once. Because we don't accept vectors where NElts * EltSize isn't a + // multiple of the char size, there will be no padding space, so we don't + // have to worry about writing data which should have been left + // uninitialized. + bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian(); + + llvm::APInt Res = llvm::APInt::getZero(NElts); + for (unsigned I = 0; I < NElts; ++I) { + const llvm::APSInt &EltAsInt = Val.getVectorElt(I).getInt(); + assert(EltAsInt.isUnsigned() && EltAsInt.getBitWidth() == 1 && + "bool vector element must be 1-bit unsigned integer!"); + + Res.insertBits(EltAsInt, BigEndian ? (NElts - I - 1) : I); + } + + SmallVector<uint8_t, 8> Bytes(NElts / 8); + llvm::StoreIntToMemory(Res, &*Bytes.begin(), NElts / 8); + Buffer.writeObject(Offset, Bytes); + } else { + // Iterate over each of the elements and write them out to the buffer at + // the appropriate offset. + CharUnits EltSizeChars = Info.Ctx.getTypeSizeInChars(EltTy); + for (unsigned I = 0; I < NElts; ++I) { + if (!visit(Val.getVectorElt(I), EltTy, Offset + I * EltSizeChars)) + return false; + } + } + + return true; + } + bool visitInt(const APSInt &Val, QualType Ty, CharUnits Offset) { APSInt AdjustedVal = Val; unsigned Width = AdjustedVal.getBitWidth(); @@ -6922,7 +7235,7 @@ class APValueToBufferConverter { AdjustedVal = AdjustedVal.extend(Width); } - SmallVector<unsigned char, 8> Bytes(Width / 8); + SmallVector<uint8_t, 8> Bytes(Width / 8); llvm::StoreIntToMemory(AdjustedVal, &*Bytes.begin(), Width / 8); Buffer.writeObject(Offset, Bytes); return true; @@ -6934,12 +7247,12 @@ class APValueToBufferConverter { } public: - static Optional<BitCastBuffer> convert(EvalInfo &Info, const APValue &Src, - const CastExpr *BCE) { + static std::optional<BitCastBuffer> + convert(EvalInfo &Info, const APValue &Src, const CastExpr *BCE) { CharUnits DstSize = Info.Ctx.getTypeSizeInChars(BCE->getType()); APValueToBufferConverter Converter(Info, DstSize, BCE); if (!Converter.visit(Src, BCE->getSubExpr()->getType())) - return None; + return std::nullopt; return Converter.Buffer; } }; @@ -6957,22 +7270,22 @@ class BufferToAPValueConverter { // Emit an unsupported bit_cast type error. Sema refuses to build a bit_cast // with an invalid type, so anything left is a deficiency on our part (FIXME). // Ideally this will be unreachable. - llvm::NoneType unsupportedType(QualType Ty) { + std::nullopt_t unsupportedType(QualType Ty) { Info.FFDiag(BCE->getBeginLoc(), diag::note_constexpr_bit_cast_unsupported_type) << Ty; - return None; + return std::nullopt; } - llvm::NoneType unrepresentableValue(QualType Ty, const APSInt &Val) { + std::nullopt_t unrepresentableValue(QualType Ty, const APSInt &Val) { Info.FFDiag(BCE->getBeginLoc(), diag::note_constexpr_bit_cast_unrepresentable_value) << Ty << toString(Val, /*Radix=*/10); - return None; + return std::nullopt; } - Optional<APValue> visit(const BuiltinType *T, CharUnits Offset, - const EnumType *EnumSugar = nullptr) { + std::optional<APValue> visit(const BuiltinType *T, CharUnits Offset, + const EnumType *EnumSugar = nullptr) { if (T->isNullPtrType()) { uint64_t NullValue = Info.Ctx.getTargetNullPointerValue(QualType(T, 0)); return APValue((Expr *)nullptr, @@ -7008,7 +7321,7 @@ class BufferToAPValueConverter { Info.FFDiag(BCE->getExprLoc(), diag::note_constexpr_bit_cast_indet_dest) << DisplayType << Info.Ctx.getLangOpts().CharIsSigned; - return None; + return std::nullopt; } return APValue::IndeterminateValue(); @@ -7040,7 +7353,7 @@ class BufferToAPValueConverter { return unsupportedType(QualType(T, 0)); } - Optional<APValue> visit(const RecordType *RTy, CharUnits Offset) { + std::optional<APValue> visit(const RecordType *RTy, CharUnits Offset) { const RecordDecl *RD = RTy->getAsRecordDecl(); const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD); @@ -7056,14 +7369,11 @@ class BufferToAPValueConverter { for (size_t I = 0, E = CXXRD->getNumBases(); I != E; ++I) { const CXXBaseSpecifier &BS = CXXRD->bases_begin()[I]; CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl(); - if (BaseDecl->isEmpty() || - Info.Ctx.getASTRecordLayout(BaseDecl).getNonVirtualSize().isZero()) - continue; - Optional<APValue> SubObj = visitType( + std::optional<APValue> SubObj = visitType( BS.getType(), Layout.getBaseClassOffset(BaseDecl) + Offset); if (!SubObj) - return None; + return std::nullopt; ResultVal.getStructBase(I) = *SubObj; } } @@ -7076,7 +7386,7 @@ class BufferToAPValueConverter { if (FD->isBitField()) { Info.FFDiag(BCE->getBeginLoc(), diag::note_constexpr_bit_cast_unsupported_bitfield); - return None; + return std::nullopt; } uint64_t FieldOffsetBits = Layout.getFieldOffset(FieldIdx); @@ -7086,9 +7396,9 @@ class BufferToAPValueConverter { CharUnits::fromQuantity(FieldOffsetBits / Info.Ctx.getCharWidth()) + Offset; QualType FieldTy = FD->getType(); - Optional<APValue> SubObj = visitType(FieldTy, FieldOffset); + std::optional<APValue> SubObj = visitType(FieldTy, FieldOffset); if (!SubObj) - return None; + return std::nullopt; ResultVal.getStructField(FieldIdx) = *SubObj; ++FieldIdx; } @@ -7096,7 +7406,7 @@ class BufferToAPValueConverter { return ResultVal; } - Optional<APValue> visit(const EnumType *Ty, CharUnits Offset) { + std::optional<APValue> visit(const EnumType *Ty, CharUnits Offset) { QualType RepresentationType = Ty->getDecl()->getIntegerType(); assert(!RepresentationType.isNull() && "enum forward decl should be caught by Sema"); @@ -7107,27 +7417,98 @@ class BufferToAPValueConverter { return visit(AsBuiltin, Offset, /*EnumTy=*/Ty); } - Optional<APValue> visit(const ConstantArrayType *Ty, CharUnits Offset) { - size_t Size = Ty->getSize().getLimitedValue(); + std::optional<APValue> visit(const ConstantArrayType *Ty, CharUnits Offset) { + size_t Size = Ty->getLimitedSize(); CharUnits ElementWidth = Info.Ctx.getTypeSizeInChars(Ty->getElementType()); APValue ArrayValue(APValue::UninitArray(), Size, Size); for (size_t I = 0; I != Size; ++I) { - Optional<APValue> ElementValue = + std::optional<APValue> ElementValue = visitType(Ty->getElementType(), Offset + I * ElementWidth); if (!ElementValue) - return None; + return std::nullopt; ArrayValue.getArrayInitializedElt(I) = std::move(*ElementValue); } return ArrayValue; } - Optional<APValue> visit(const Type *Ty, CharUnits Offset) { + std::optional<APValue> visit(const VectorType *VTy, CharUnits Offset) { + QualType EltTy = VTy->getElementType(); + unsigned NElts = VTy->getNumElements(); + unsigned EltSize = + VTy->isExtVectorBoolType() ? 1 : Info.Ctx.getTypeSize(EltTy); + + if ((NElts * EltSize) % Info.Ctx.getCharWidth() != 0) { + // The vector's size in bits is not a multiple of the target's byte size, + // so its layout is unspecified. For now, we'll simply treat these cases + // as unsupported (this should only be possible with OpenCL bool vectors + // whose element count isn't a multiple of the byte size). + Info.FFDiag(BCE->getBeginLoc(), + diag::note_constexpr_bit_cast_invalid_vector) + << QualType(VTy, 0) << EltSize << NElts << Info.Ctx.getCharWidth(); + return std::nullopt; + } + + if (EltTy->isRealFloatingType() && &Info.Ctx.getFloatTypeSemantics(EltTy) == + &APFloat::x87DoubleExtended()) { + // The layout for x86_fp80 vectors seems to be handled very inconsistently + // by both clang and LLVM, so for now we won't allow bit_casts involving + // it in a constexpr context. + Info.FFDiag(BCE->getBeginLoc(), + diag::note_constexpr_bit_cast_unsupported_type) + << EltTy; + return std::nullopt; + } + + SmallVector<APValue, 4> Elts; + Elts.reserve(NElts); + if (VTy->isExtVectorBoolType()) { + // Special handling for OpenCL bool vectors: + // Since these vectors are stored as packed bits, but we can't read + // individual bits from the BitCastBuffer, we'll buffer all of the + // elements together into an appropriately sized APInt and write them all + // out at once. Because we don't accept vectors where NElts * EltSize + // isn't a multiple of the char size, there will be no padding space, so + // we don't have to worry about reading any padding data which didn't + // actually need to be accessed. + bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian(); + + SmallVector<uint8_t, 8> Bytes; + Bytes.reserve(NElts / 8); + if (!Buffer.readObject(Offset, CharUnits::fromQuantity(NElts / 8), Bytes)) + return std::nullopt; + + APSInt SValInt(NElts, true); + llvm::LoadIntFromMemory(SValInt, &*Bytes.begin(), Bytes.size()); + + for (unsigned I = 0; I < NElts; ++I) { + llvm::APInt Elt = + SValInt.extractBits(1, (BigEndian ? NElts - I - 1 : I) * EltSize); + Elts.emplace_back( + APSInt(std::move(Elt), !EltTy->isSignedIntegerType())); + } + } else { + // Iterate over each of the elements and read them from the buffer at + // the appropriate offset. + CharUnits EltSizeChars = Info.Ctx.getTypeSizeInChars(EltTy); + for (unsigned I = 0; I < NElts; ++I) { + std::optional<APValue> EltValue = + visitType(EltTy, Offset + I * EltSizeChars); + if (!EltValue) + return std::nullopt; + Elts.push_back(std::move(*EltValue)); + } + } + + return APValue(Elts.data(), Elts.size()); + } + + std::optional<APValue> visit(const Type *Ty, CharUnits Offset) { return unsupportedType(QualType(Ty, 0)); } - Optional<APValue> visitType(QualType Ty, CharUnits Offset) { + std::optional<APValue> visitType(QualType Ty, CharUnits Offset) { QualType Can = Ty.getCanonicalType(); switch (Can->getTypeClass()) { @@ -7152,8 +7533,8 @@ class BufferToAPValueConverter { public: // Pull out a full value of type DstType. - static Optional<APValue> convert(EvalInfo &Info, BitCastBuffer &Buffer, - const CastExpr *BCE) { + static std::optional<APValue> convert(EvalInfo &Info, BitCastBuffer &Buffer, + const CastExpr *BCE) { BufferToAPValueConverter Converter(Info, Buffer, BCE); return Converter.visitType(BCE->getType(), CharUnits::fromQuantity(0)); } @@ -7222,33 +7603,23 @@ static bool checkBitCastConstexprEligibility(EvalInfo *Info, return SourceOK; } -static bool handleLValueToRValueBitCast(EvalInfo &Info, APValue &DestValue, - APValue &SourceValue, +static bool handleRValueToRValueBitCast(EvalInfo &Info, APValue &DestValue, + const APValue &SourceRValue, const CastExpr *BCE) { assert(CHAR_BIT == 8 && Info.Ctx.getTargetInfo().getCharWidth() == 8 && "no host or target supports non 8-bit chars"); - assert(SourceValue.isLValue() && - "LValueToRValueBitcast requires an lvalue operand!"); if (!checkBitCastConstexprEligibility(&Info, Info.Ctx, BCE)) return false; - LValue SourceLValue; - APValue SourceRValue; - SourceLValue.setFrom(Info.Ctx, SourceValue); - if (!handleLValueToRValueConversion( - Info, BCE, BCE->getSubExpr()->getType().withConst(), SourceLValue, - SourceRValue, /*WantObjectRepresentation=*/true)) - return false; - // Read out SourceValue into a char buffer. - Optional<BitCastBuffer> Buffer = + std::optional<BitCastBuffer> Buffer = APValueToBufferConverter::convert(Info, SourceRValue, BCE); if (!Buffer) return false; // Write out the buffer into a new APValue. - Optional<APValue> MaybeDestValue = + std::optional<APValue> MaybeDestValue = BufferToAPValueConverter::convert(Info, *Buffer, BCE); if (!MaybeDestValue) return false; @@ -7257,6 +7628,25 @@ static bool handleLValueToRValueBitCast(EvalInfo &Info, APValue &DestValue, return true; } +static bool handleLValueToRValueBitCast(EvalInfo &Info, APValue &DestValue, + APValue &SourceValue, + const CastExpr *BCE) { + assert(CHAR_BIT == 8 && Info.Ctx.getTargetInfo().getCharWidth() == 8 && + "no host or target supports non 8-bit chars"); + assert(SourceValue.isLValue() && + "LValueToRValueBitcast requires an lvalue operand!"); + + LValue SourceLValue; + APValue SourceRValue; + SourceLValue.setFrom(Info.Ctx, SourceValue); + if (!handleLValueToRValueConversion( + Info, BCE, BCE->getSubExpr()->getType().withConst(), SourceLValue, + SourceRValue, /*WantObjectRepresentation=*/true)) + return false; + + return handleRValueToRValueBitCast(Info, DestValue, SourceRValue, BCE); +} + template <class Derived> class ExprEvaluatorBase : public ConstStmtVisitor<Derived, bool> { @@ -7327,6 +7717,12 @@ protected: bool ZeroInitialization(const Expr *E) { return Error(E); } + bool IsConstantEvaluatedBuiltinCall(const CallExpr *E) { + unsigned BuiltinOp = E->getBuiltinCallee(); + return BuiltinOp != 0 && + Info.Ctx.BuiltinInfo.isConstantEvaluated(BuiltinOp); + } + public: ExprEvaluatorBase(EvalInfo &Info) : Info(Info) {} @@ -7335,7 +7731,7 @@ public: /// Report an evaluation error. This should only be called when an error is /// first discovered. When propagating an error, just return false. bool Error(const Expr *E, diag::kind D) { - Info.FFDiag(E, D); + Info.FFDiag(E, D) << E->getSourceRange(); return false; } bool Error(const Expr *E) { @@ -7349,6 +7745,14 @@ public: return Error(E); } + bool VisitEmbedExpr(const EmbedExpr *E) { + const auto It = E->begin(); + return StmtVisitorTy::Visit(*It); + } + + bool VisitPredefinedExpr(const PredefinedExpr *E) { + return StmtVisitorTy::Visit(E->getFunctionName()); + } bool VisitConstantExpr(const ConstantExpr *E) { if (E->hasAPValueResult()) return DerivedSuccess(E->getAPValueResult(), E); @@ -7474,13 +7878,14 @@ public: } bool VisitOpaqueValueExpr(const OpaqueValueExpr *E) { - if (APValue *Value = Info.CurrentCall->getCurrentTemporary(E)) + if (APValue *Value = Info.CurrentCall->getCurrentTemporary(E); + Value && !Value->isAbsent()) return DerivedSuccess(*Value, E); const Expr *Source = E->getSourceExpr(); if (!Source) return Error(E); - if (Source == E) { // sanity checking. + if (Source == E) { assert(0 && "OpaqueValueExpr recursively refers to itself"); return Error(E); } @@ -7534,7 +7939,7 @@ public: const FunctionDecl *FD = nullptr; LValue *This = nullptr, ThisVal; - auto Args = llvm::makeArrayRef(E->getArgs(), E->getNumArgs()); + auto Args = llvm::ArrayRef(E->getArgs(), E->getNumArgs()); bool HasQualifier = false; CallRef Call; @@ -7576,6 +7981,11 @@ public: if (!CalleeLV.getLValueOffset().isZero()) return Error(Callee); + if (CalleeLV.isNullPointer()) { + Info.FFDiag(Callee, diag::note_constexpr_null_callee) + << const_cast<Expr *>(Callee); + return false; + } FD = dyn_cast_or_null<FunctionDecl>( CalleeLV.getLValueBase().dyn_cast<const ValueDecl *>()); if (!FD) @@ -7593,15 +8003,19 @@ public: if (OCE && OCE->isAssignmentOp()) { assert(Args.size() == 2 && "wrong number of arguments in assignment"); Call = Info.CurrentCall->createCall(FD); - if (!EvaluateArgs(isa<CXXMethodDecl>(FD) ? Args.slice(1) : Args, Call, - Info, FD, /*RightToLeft=*/true)) + bool HasThis = false; + if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) + HasThis = MD->isImplicitObjectMemberFunction(); + if (!EvaluateArgs(HasThis ? Args.slice(1) : Args, Call, Info, FD, + /*RightToLeft=*/true)) return false; } // Overloaded operator calls to member functions are represented as normal // calls with '*this' as the first argument. const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD); - if (MD && !MD->isStatic()) { + if (MD && + (MD->isImplicitObjectMemberFunction() || (OCE && MD->isStatic()))) { // FIXME: When selecting an implicit conversion for an overloaded // operator delete, we sometimes try to evaluate calls to conversion // operators without a 'this' parameter! @@ -7610,7 +8024,20 @@ public: if (!EvaluateObjectArgument(Info, Args[0], ThisVal)) return false; - This = &ThisVal; + + // If we are calling a static operator, the 'this' argument needs to be + // ignored after being evaluated. + if (MD->isInstance()) + This = &ThisVal; + + // If this is syntactically a simple assignment using a trivial + // assignment operator, start the lifetimes of union members as needed, + // per C++20 [class.union]5. + if (Info.getLangOpts().CPlusPlus20 && OCE && + OCE->getOperator() == OO_Equal && MD->isTrivial() && + !MaybeHandleUnionActiveMemberChange(Info, Args[0], ThisVal)) + return false; + Args = Args.slice(1); } else if (MD && MD->isLambdaStaticInvoker()) { // Map the static invoker for the lambda back to the call operator. @@ -7642,7 +8069,8 @@ public: assert(CorrespondingCallOpSpecialization && "We must always have a function call operator specialization " "that corresponds to our static invoker specialization"); - FD = cast<CXXMethodDecl>(CorrespondingCallOpSpecialization); + assert(isa<CXXMethodDecl>(CorrespondingCallOpSpecialization)); + FD = CorrespondingCallOpSpecialization; } else FD = LambdaCallOp; } else if (FD->isReplaceableGlobalAllocationFunction()) { @@ -7676,7 +8104,7 @@ public: CovariantAdjustmentPath); if (!FD) return false; - } else { + } else if (NamedMember && NamedMember->isImplicitObjectMemberFunction()) { // Check that the 'this' pointer points to an object of the right type. // FIXME: If this is an assignment operator call, we may need to change // the active union member before we check this. @@ -7697,7 +8125,7 @@ public: Stmt *Body = FD->getBody(Definition); if (!CheckConstexprFunction(Info, E->getExprLoc(), FD, Definition, Body) || - !HandleFunctionCall(E->getExprLoc(), Definition, This, Args, Call, + !HandleFunctionCall(E->getExprLoc(), Definition, This, E, Args, Call, Body, Info, Result, ResultSlot)) return false; @@ -7857,8 +8285,8 @@ public: bool VisitStmtExpr(const StmtExpr *E) { // We will have checked the full-expressions inside the statement expression // when they were completed, and don't need to check them again now. - llvm::SaveAndRestore<bool> NotCheckingForUB( - Info.CheckingForUndefinedBehavior, false); + llvm::SaveAndRestore NotCheckingForUB(Info.CheckingForUndefinedBehavior, + false); const CompoundStmt *CS = E->getSubStmt(); if (CS->body_empty()) @@ -7895,6 +8323,10 @@ public: llvm_unreachable("Return from function from the loop above."); } + bool VisitPackIndexingExpr(const PackIndexingExpr *E) { + return StmtVisitorTy::Visit(E->getSelectedExpr()); + } + /// Visit a value which is evaluated, but whose value is ignored. void VisitIgnoredValue(const Expr *E) { EvaluateIgnoredValue(Info, E); @@ -8062,6 +8494,7 @@ public: bool VisitVarDecl(const Expr *E, const VarDecl *VD); bool VisitUnaryPreIncDec(const UnaryOperator *UO); + bool VisitCallExpr(const CallExpr *E); bool VisitDeclRefExpr(const DeclRefExpr *E); bool VisitPredefinedExpr(const PredefinedExpr *E) { return Success(E); } bool VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E); @@ -8090,7 +8523,8 @@ public: return LValueExprEvaluatorBaseTy::VisitCastExpr(E); case CK_LValueBitCast: - this->CCEDiag(E, diag::note_constexpr_invalid_cast) << 2; + this->CCEDiag(E, diag::note_constexpr_invalid_cast) + << 2 << Info.Ctx.getLangOpts().CPlusPlus; if (!Visit(E->getSubExpr())) return false; Result.Designator.setInvalid(); @@ -8110,6 +8544,53 @@ public: }; } // end anonymous namespace +/// Get an lvalue to a field of a lambda's closure type. +static bool HandleLambdaCapture(EvalInfo &Info, const Expr *E, LValue &Result, + const CXXMethodDecl *MD, const FieldDecl *FD, + bool LValueToRValueConversion) { + // Static lambda function call operators can't have captures. We already + // diagnosed this, so bail out here. + if (MD->isStatic()) { + assert(Info.CurrentCall->This == nullptr && + "This should not be set for a static call operator"); + return false; + } + + // Start with 'Result' referring to the complete closure object... + if (MD->isExplicitObjectMemberFunction()) { + // Self may be passed by reference or by value. + const ParmVarDecl *Self = MD->getParamDecl(0); + if (Self->getType()->isReferenceType()) { + APValue *RefValue = Info.getParamSlot(Info.CurrentCall->Arguments, Self); + Result.setFrom(Info.Ctx, *RefValue); + } else { + const ParmVarDecl *VD = Info.CurrentCall->Arguments.getOrigParam(Self); + CallStackFrame *Frame = + Info.getCallFrameAndDepth(Info.CurrentCall->Arguments.CallIndex) + .first; + unsigned Version = Info.CurrentCall->Arguments.Version; + Result.set({VD, Frame->Index, Version}); + } + } else + Result = *Info.CurrentCall->This; + + // ... then update it to refer to the field of the closure object + // that represents the capture. + if (!HandleLValueMember(Info, E, Result, FD)) + return false; + + // And if the field is of reference type (or if we captured '*this' by + // reference), update 'Result' to refer to what + // the field refers to. + if (LValueToRValueConversion) { + APValue RVal; + if (!handleLValueToRValueConversion(Info, E, FD->getType(), Result, RVal)) + return false; + Result.setFrom(Info.Ctx, RVal); + } + return true; +} + /// Evaluate an expression as an lvalue. This can be legitimately called on /// expressions which are not glvalues, in three cases: /// * function designators in C, and @@ -8119,13 +8600,14 @@ static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info, bool InvalidBaseOK) { assert(!E->isValueDependent()); assert(E->isGLValue() || E->getType()->isFunctionType() || - E->getType()->isVoidType() || isa<ObjCSelectorExpr>(E)); + E->getType()->isVoidType() || isa<ObjCSelectorExpr>(E->IgnoreParens())); return LValueExprEvaluator(Info, Result, InvalidBaseOK).Visit(E); } bool LValueExprEvaluator::VisitDeclRefExpr(const DeclRefExpr *E) { const NamedDecl *D = E->getDecl(); - if (isa<FunctionDecl, MSGuidDecl, TemplateParamObjectDecl>(D)) + if (isa<FunctionDecl, MSGuidDecl, TemplateParamObjectDecl, + UnnamedGlobalConstantDecl>(D)) return Success(cast<ValueDecl>(D)); if (const VarDecl *VD = dyn_cast<VarDecl>(D)) return VisitVarDecl(E, VD); @@ -8152,22 +8634,9 @@ bool LValueExprEvaluator::VisitVarDecl(const Expr *E, const VarDecl *VD) { return false; if (auto *FD = Info.CurrentCall->LambdaCaptureFields.lookup(VD)) { - // Start with 'Result' referring to the complete closure object... - Result = *Info.CurrentCall->This; - // ... then update it to refer to the field of the closure object - // that represents the capture. - if (!HandleLValueMember(Info, E, Result, FD)) - return false; - // And if the field is of reference type, update 'Result' to refer to what - // the field refers to. - if (FD->getType()->isReferenceType()) { - APValue RVal; - if (!handleLValueToRValueConversion(Info, E, FD->getType(), Result, - RVal)) - return false; - Result.setFrom(Info.Ctx, RVal); - } - return true; + const auto *MD = cast<CXXMethodDecl>(Info.CurrentCall->Callee); + return HandleLambdaCapture(Info, E, Result, MD, FD, + FD->getType()->isReferenceType()); } } @@ -8226,6 +8695,26 @@ bool LValueExprEvaluator::VisitVarDecl(const Expr *E, const VarDecl *VD) { return Success(*V, E); } +bool LValueExprEvaluator::VisitCallExpr(const CallExpr *E) { + if (!IsConstantEvaluatedBuiltinCall(E)) + return ExprEvaluatorBaseTy::VisitCallExpr(E); + + switch (E->getBuiltinCallee()) { + default: + return false; + case Builtin::BIas_const: + case Builtin::BIforward: + case Builtin::BIforward_like: + case Builtin::BImove: + case Builtin::BImove_if_noexcept: + if (cast<FunctionDecl>(E->getCalleeDecl())->isConstexpr()) + return Visit(E->getArg(0)); + break; + } + + return ExprEvaluatorBaseTy::VisitCallExpr(E); +} + bool LValueExprEvaluator::VisitMaterializeTemporaryExpr( const MaterializeTemporaryExpr *E) { // Walk through the expression to find the materialized temporary itself. @@ -8235,8 +8724,8 @@ bool LValueExprEvaluator::VisitMaterializeTemporaryExpr( E->getSubExpr()->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments); // If we passed any comma operators, evaluate their LHSs. - for (unsigned I = 0, N = CommaLHSs.size(); I != N; ++I) - if (!EvaluateIgnoredValue(Info, CommaLHSs[I])) + for (const Expr *E : CommaLHSs) + if (!EvaluateIgnoredValue(Info, E)) return false; // A materialized temporary with static storage duration can appear within the @@ -8244,13 +8733,15 @@ bool LValueExprEvaluator::VisitMaterializeTemporaryExpr( // value for use outside this evaluation. APValue *Value; if (E->getStorageDuration() == SD_Static) { + if (Info.EvalMode == EvalInfo::EM_ConstantFold) + return false; // FIXME: What about SD_Thread? Value = E->getOrCreateValue(true); *Value = APValue(); Result.set(E); } else { Value = &Info.CurrentCall->createTemporary( - E, E->getType(), + E, Inner->getType(), E->getStorageDuration() == SD_FullExpression ? ScopeKind::FullExpression : ScopeKind::Block, Result); @@ -8320,7 +8811,7 @@ bool LValueExprEvaluator::VisitCXXTypeidExpr(const CXXTypeidExpr *E) { if (!Visit(E->getExprOperand())) return false; - Optional<DynamicType> DynType = + std::optional<DynamicType> DynType = ComputeDynamicType(Info, E, Result, AK_TypeId); if (!DynType) return false; @@ -8357,7 +8848,8 @@ bool LValueExprEvaluator::VisitMemberExpr(const MemberExpr *E) { bool LValueExprEvaluator::VisitArraySubscriptExpr(const ArraySubscriptExpr *E) { // FIXME: Deal with vectors as array subscript bases. - if (E->getBase()->getType()->isVectorType()) + if (E->getBase()->getType()->isVectorType() || + E->getBase()->getType()->isSveVLSBuiltinType()) return Error(E); APSInt Index; @@ -8455,7 +8947,7 @@ bool LValueExprEvaluator::VisitBinAssign(const BinaryOperator *E) { return false; if (Info.getLangOpts().CPlusPlus20 && - !HandleUnionActiveMemberChange(Info, E->getLHS(), Result)) + !MaybeHandleUnionActiveMemberChange(Info, E->getLHS(), Result)) return false; return handleAssignment(this->Info, E, Result, E->getLHS()->getType(), @@ -8490,7 +8982,7 @@ static bool getBytesReturnedByAllocSizeCall(const ASTContext &Ctx, Into = ExprResult.Val.getInt(); if (Into.isNegative() || !Into.isIntN(BitsInSizeT)) return false; - Into = Into.zextOrSelf(BitsInSizeT); + Into = Into.zext(BitsInSizeT); return true; }; @@ -8549,7 +9041,7 @@ static bool evaluateLValueAsAllocSize(EvalInfo &Info, APValue::LValueBase Base, return false; const Expr *Init = VD->getAnyInitializer(); - if (!Init) + if (!Init || Init->getType().isNull()) return false; const Expr *E = Init->IgnoreParens(); @@ -8622,42 +9114,46 @@ public: return Error(E); } bool VisitCXXThisExpr(const CXXThisExpr *E) { - // Can't look at 'this' when checking a potential constant expression. - if (Info.checkingPotentialConstantExpression()) - return false; - if (!Info.CurrentCall->This) { + auto DiagnoseInvalidUseOfThis = [&] { if (Info.getLangOpts().CPlusPlus11) Info.FFDiag(E, diag::note_constexpr_this) << E->isImplicit(); else Info.FFDiag(E); + }; + + // Can't look at 'this' when checking a potential constant expression. + if (Info.checkingPotentialConstantExpression()) return false; - } - Result = *Info.CurrentCall->This; - // If we are inside a lambda's call operator, the 'this' expression refers - // to the enclosing '*this' object (either by value or reference) which is - // either copied into the closure object's field that represents the '*this' - // or refers to '*this'. - if (isLambdaCallOperator(Info.CurrentCall->Callee)) { - // Ensure we actually have captured 'this'. (an error will have - // been previously reported if not). - if (!Info.CurrentCall->LambdaThisCaptureField) - return false; - // Update 'Result' to refer to the data member/field of the closure object - // that represents the '*this' capture. - if (!HandleLValueMember(Info, E, Result, - Info.CurrentCall->LambdaThisCaptureField)) + bool IsExplicitLambda = + isLambdaCallWithExplicitObjectParameter(Info.CurrentCall->Callee); + if (!IsExplicitLambda) { + if (!Info.CurrentCall->This) { + DiagnoseInvalidUseOfThis(); return false; - // If we captured '*this' by reference, replace the field with its referent. - if (Info.CurrentCall->LambdaThisCaptureField->getType() - ->isPointerType()) { - APValue RVal; - if (!handleLValueToRValueConversion(Info, E, E->getType(), Result, - RVal)) + } + + Result = *Info.CurrentCall->This; + } + + if (isLambdaCallOperator(Info.CurrentCall->Callee)) { + // Ensure we actually have captured 'this'. If something was wrong with + // 'this' capture, the error would have been previously reported. + // Otherwise we can be inside of a default initialization of an object + // declared by lambda's body, so no need to return false. + if (!Info.CurrentCall->LambdaThisCaptureField) { + if (IsExplicitLambda && !Info.CurrentCall->This) { + DiagnoseInvalidUseOfThis(); return false; + } - Result.setFrom(Info.Ctx, RVal); + return true; } + + const auto *MD = cast<CXXMethodDecl>(Info.CurrentCall->Callee); + return HandleLambdaCapture( + Info, E, Result, MD, Info.CurrentCall->LambdaThisCaptureField, + Info.CurrentCall->LambdaThisCaptureField->getType()->isPointerType()); } return true; } @@ -8665,26 +9161,29 @@ public: bool VisitCXXNewExpr(const CXXNewExpr *E); bool VisitSourceLocExpr(const SourceLocExpr *E) { - assert(E->isStringType() && "SourceLocExpr isn't a pointer type?"); + assert(!E->isIntType() && "SourceLocExpr isn't a pointer type?"); APValue LValResult = E->EvaluateInContext( Info.Ctx, Info.CurrentCall->CurSourceLocExprScope.getDefaultExpr()); Result.setFrom(Info.Ctx, LValResult); return true; } + bool VisitEmbedExpr(const EmbedExpr *E) { + llvm::report_fatal_error("Not yet implemented for ExprConstant.cpp"); + return true; + } + bool VisitSYCLUniqueStableNameExpr(const SYCLUniqueStableNameExpr *E) { std::string ResultStr = E->ComputeName(Info.Ctx); - Info.Ctx.SYCLUniqueStableNameEvaluatedValues[E] = ResultStr; - QualType CharTy = Info.Ctx.CharTy.withConst(); APInt Size(Info.Ctx.getTypeSize(Info.Ctx.getSizeType()), ResultStr.size() + 1); - QualType ArrayTy = Info.Ctx.getConstantArrayType(CharTy, Size, nullptr, - ArrayType::Normal, 0); + QualType ArrayTy = Info.Ctx.getConstantArrayType( + CharTy, Size, nullptr, ArraySizeModifier::Normal, 0); StringLiteral *SL = - StringLiteral::Create(Info.Ctx, ResultStr, StringLiteral::Ascii, + StringLiteral::Create(Info.Ctx, ResultStr, StringLiteralKind::Ordinary, /*Pascal*/ false, ArrayTy, E->getLocation()); evaluateLValue(SL, Result); @@ -8732,6 +9231,22 @@ bool PointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) { return evaluateLValue(E->getSubExpr(), Result); } +// Is the provided decl 'std::source_location::current'? +static bool IsDeclSourceLocationCurrent(const FunctionDecl *FD) { + if (!FD) + return false; + const IdentifierInfo *FnII = FD->getIdentifier(); + if (!FnII || !FnII->isStr("current")) + return false; + + const auto *RD = dyn_cast<RecordDecl>(FD->getParent()); + if (!RD) + return false; + + const IdentifierInfo *ClassII = RD->getIdentifier(); + return RD->isInStdNamespace() && ClassII && ClassII->isStr("source_location"); +} + bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) { const Expr *SubExpr = E->getSubExpr(); @@ -8749,21 +9264,42 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) { // permitted in constant expressions in C++11. Bitcasts from cv void* are // also static_casts, but we disallow them as a resolution to DR1312. if (!E->getType()->isVoidPointerType()) { - if (!Result.InvalidBase && !Result.Designator.Invalid && - !Result.IsNullPtr && - Info.Ctx.hasSameUnqualifiedType(Result.Designator.getType(Info.Ctx), - E->getType()->getPointeeType()) && - Info.getStdAllocatorCaller("allocate")) { - // Inside a call to std::allocator::allocate and friends, we permit - // casting from void* back to cv1 T* for a pointer that points to a - // cv2 T. + // In some circumstances, we permit casting from void* to cv1 T*, when the + // actual pointee object is actually a cv2 T. + bool HasValidResult = !Result.InvalidBase && !Result.Designator.Invalid && + !Result.IsNullPtr; + bool VoidPtrCastMaybeOK = + Result.IsNullPtr || + (HasValidResult && + Info.Ctx.hasSimilarType(Result.Designator.getType(Info.Ctx), + E->getType()->getPointeeType())); + // 1. We'll allow it in std::allocator::allocate, and anything which that + // calls. + // 2. HACK 2022-03-28: Work around an issue with libstdc++'s + // <source_location> header. Fixed in GCC 12 and later (2022-04-??). + // We'll allow it in the body of std::source_location::current. GCC's + // implementation had a parameter of type `void*`, and casts from + // that back to `const __impl*` in its body. + if (VoidPtrCastMaybeOK && + (Info.getStdAllocatorCaller("allocate") || + IsDeclSourceLocationCurrent(Info.CurrentCall->Callee) || + Info.getLangOpts().CPlusPlus26)) { + // Permitted. } else { - Result.Designator.setInvalid(); - if (SubExpr->getType()->isVoidPointerType()) + if (SubExpr->getType()->isVoidPointerType() && + Info.getLangOpts().CPlusPlus) { + if (HasValidResult) + CCEDiag(E, diag::note_constexpr_invalid_void_star_cast) + << SubExpr->getType() << Info.getLangOpts().CPlusPlus26 + << Result.Designator.getType(Info.Ctx).getCanonicalType() + << E->getType()->getPointeeType(); + else + CCEDiag(E, diag::note_constexpr_invalid_cast) + << 3 << SubExpr->getType(); + } else CCEDiag(E, diag::note_constexpr_invalid_cast) - << 3 << SubExpr->getType(); - else - CCEDiag(E, diag::note_constexpr_invalid_cast) << 2; + << 2 << Info.Ctx.getLangOpts().CPlusPlus; + Result.Designator.setInvalid(); } } if (E->getCastKind() == CK_AddressSpaceConversion && Result.IsNullPtr) @@ -8800,7 +9336,8 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) { return ZeroInitialization(E); case CK_IntegralToPointer: { - CCEDiag(E, diag::note_constexpr_invalid_cast) << 2; + CCEDiag(E, diag::note_constexpr_invalid_cast) + << 2 << Info.Ctx.getLangOpts().CPlusPlus; APValue Value; if (!EvaluateIntegerOrLValue(SubExpr, Value, Info)) @@ -8816,6 +9353,13 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) { Result.IsNullPtr = false; return true; } else { + // In rare instances, the value isn't an lvalue. + // For example, when the value is the difference between the addresses of + // two labels. We reject that as a constant expression because we can't + // compute a valid offset to convert into a pointer. + if (!Value.isLValue()) + return false; + // Cast is of an lvalue, no need to change value. Result.setFrom(Info.Ctx, Value); return true; @@ -8867,8 +9411,7 @@ static CharUnits GetAlignOfType(EvalInfo &Info, QualType T, // C++ [expr.alignof]p3: // When alignof is applied to a reference type, the result is the // alignment of the referenced type. - if (const ReferenceType *Ref = T->getAs<ReferenceType>()) - T = Ref->getPointeeType(); + T = T.getNonReferenceType(); if (T.getQualifiers().hasUnaligned()) return CharUnits::One(); @@ -8961,13 +9504,9 @@ bool PointerExprEvaluator::visitNonBuiltinCallExpr(const CallExpr *E) { } bool PointerExprEvaluator::VisitCallExpr(const CallExpr *E) { - if (IsStringLiteralCall(E)) - return Success(E); - - if (unsigned BuiltinOp = E->getBuiltinCallee()) - return VisitBuiltinCallExpr(E, BuiltinOp); - - return visitNonBuiltinCallExpr(E); + if (!IsConstantEvaluatedBuiltinCall(E)) + return visitNonBuiltinCallExpr(E); + return VisitBuiltinCallExpr(E, E->getBuiltinCallee()); } // Determine if T is a character type for which we guarantee that @@ -8978,7 +9517,12 @@ static bool isOneByteCharacterType(QualType T) { bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, unsigned BuiltinOp) { + if (IsNoOpCall(E)) + return Success(E); + switch (BuiltinOp) { + case Builtin::BIaddressof: + case Builtin::BI__addressof: case Builtin::BI__builtin_addressof: return evaluateLValue(E->getArg(0), Result); case Builtin::BI__builtin_assume_aligned: { @@ -9082,11 +9626,11 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, case Builtin::BIwmemchr: if (Info.getLangOpts().CPlusPlus11) Info.CCEDiag(E, diag::note_constexpr_invalid_function) - << /*isConstexpr*/0 << /*isConstructor*/0 - << (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'"); + << /*isConstexpr*/ 0 << /*isConstructor*/ 0 + << ("'" + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'").str(); else Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr); - LLVM_FALLTHROUGH; + [[fallthrough]]; case Builtin::BI__builtin_strchr: case Builtin::BI__builtin_wcschr: case Builtin::BI__builtin_memchr: @@ -9105,7 +9649,7 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, APSInt N; if (!EvaluateInteger(E->getArg(2), N, Info)) return false; - MaxLength = N.getExtValue(); + MaxLength = N.getZExtValue(); } // We cannot find the value if there are no candidates to match against. if (MaxLength == 0u) @@ -9128,7 +9672,7 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, // FIXME: We can compare the bytes in the correct order. if (IsRawByte && !isOneByteCharacterType(CharTy)) { Info.FFDiag(E, diag::note_constexpr_memchr_unsupported) - << (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'") + << ("'" + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'").str() << CharTy; return false; } @@ -9147,7 +9691,7 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, Desired)) return ZeroInitialization(E); StopAtNull = true; - LLVM_FALLTHROUGH; + [[fallthrough]]; case Builtin::BImemchr: case Builtin::BI__builtin_memchr: case Builtin::BI__builtin_char_memchr: @@ -9160,7 +9704,7 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, case Builtin::BIwcschr: case Builtin::BI__builtin_wcschr: StopAtNull = true; - LLVM_FALLTHROUGH; + [[fallthrough]]; case Builtin::BIwmemchr: case Builtin::BI__builtin_wmemchr: // wcschr and wmemchr are given a wchar_t to look for. Just use it. @@ -9190,11 +9734,11 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, case Builtin::BIwmemmove: if (Info.getLangOpts().CPlusPlus11) Info.CCEDiag(E, diag::note_constexpr_invalid_function) - << /*isConstexpr*/0 << /*isConstructor*/0 - << (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'"); + << /*isConstexpr*/ 0 << /*isConstructor*/ 0 + << ("'" + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'").str(); else Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr); - LLVM_FALLTHROUGH; + [[fallthrough]]; case Builtin::BI__builtin_memcpy: case Builtin::BI__builtin_memmove: case Builtin::BI__builtin_wmemcpy: @@ -9262,6 +9806,8 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, // Figure out how many T's we're copying. uint64_t TSize = Info.Ctx.getTypeSizeInChars(T).getQuantity(); + if (TSize == 0) + return false; if (!WChar) { uint64_t Remainder; llvm::APInt OrigN = N; @@ -9330,10 +9876,8 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, } default: - break; + return false; } - - return visitNonBuiltinCallExpr(E); } static bool EvaluateArrayNewInitList(EvalInfo &Info, LValue &This, @@ -9396,7 +9940,7 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) { bool ValueInit = false; QualType AllocType = E->getAllocatedType(); - if (Optional<const Expr*> ArraySize = E->getArraySize()) { + if (std::optional<const Expr *> ArraySize = E->getArraySize()) { const Expr *Stripped = *ArraySize; for (; auto *ICE = dyn_cast<ImplicitCastExpr>(Stripped); Stripped = ICE->getSubExpr()) @@ -9423,14 +9967,12 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) { // -- its value is such that the size of the allocated object would // exceed the implementation-defined limit - if (ConstantArrayType::getNumAddressingBits(Info.Ctx, AllocType, - ArrayBound) > - ConstantArrayType::getMaxSizeBits(Info.Ctx)) { + if (!Info.CheckArraySize(ArraySize.value()->getExprLoc(), + ConstantArrayType::getNumAddressingBits( + Info.Ctx, AllocType, ArrayBound), + ArrayBound.getZExtValue(), /*Diag=*/!IsNothrow)) { if (IsNothrow) return ZeroInitialization(E); - - Info.FFDiag(*ArraySize, diag::note_constexpr_new_too_large) - << ArrayBound << (*ArraySize)->getSourceRange(); return false; } @@ -9449,9 +9991,9 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) { assert(CAT && "unexpected type for array initializer"); unsigned Bits = - std::max(CAT->getSize().getBitWidth(), ArrayBound.getBitWidth()); - llvm::APInt InitBound = CAT->getSize().zextOrSelf(Bits); - llvm::APInt AllocBound = ArrayBound.zextOrSelf(Bits); + std::max(CAT->getSizeBitWidth(), ArrayBound.getBitWidth()); + llvm::APInt InitBound = CAT->getSize().zext(Bits); + llvm::APInt AllocBound = ArrayBound.zext(Bits); if (InitBound.ugt(AllocBound)) { if (IsNothrow) return ZeroInitialization(E); @@ -9470,7 +10012,7 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) { } AllocType = Info.Ctx.getConstantArrayType(AllocType, ArrayBound, nullptr, - ArrayType::Normal, 0); + ArraySizeModifier::Normal, 0); } else { assert(!AllocType->isArrayType() && "array allocation with non-array new"); @@ -9542,7 +10084,7 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) { } else if (Init) { if (!EvaluateInPlace(*Val, Info, Result, Init)) return false; - } else if (!getDefaultInitValue(AllocType, *Val)) { + } else if (!handleDefaultInitValue(AllocType, *Val)) { return false; } @@ -9678,6 +10220,9 @@ namespace { bool VisitCXXConstructExpr(const CXXConstructExpr *E, QualType T); bool VisitCXXStdInitializerListExpr(const CXXStdInitializerListExpr *E); bool VisitBinCmp(const BinaryOperator *E); + bool VisitCXXParenListInitExpr(const CXXParenListInitExpr *E); + bool VisitCXXParenListOrInitListExpr(const Expr *ExprToVisit, + ArrayRef<Expr *> Args); }; } @@ -9715,7 +10260,7 @@ static bool HandleClassZeroInitialization(EvalInfo &Info, const Expr *E, for (const auto *I : RD->fields()) { // -- if T is a reference type, no initialization is performed. - if (I->isUnnamedBitfield() || I->getType()->isReferenceType()) + if (I->isUnnamedBitField() || I->getType()->isReferenceType()) continue; LValue Subobject = This; @@ -9738,7 +10283,7 @@ bool RecordExprEvaluator::ZeroInitialization(const Expr *E, QualType T) { // C++11 [dcl.init]p5: If T is a (possibly cv-qualified) union type, the // object's first non-static named data member is zero-initialized RecordDecl::field_iterator I = RD->field_begin(); - while (I != RD->field_end() && (*I)->isUnnamedBitfield()) + while (I != RD->field_end() && (*I)->isUnnamedBitField()) ++I; if (I == RD->field_end()) { Result = APValue((const FieldDecl*)nullptr); @@ -9796,8 +10341,13 @@ bool RecordExprEvaluator::VisitCastExpr(const CastExpr *E) { bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) { if (E->isTransparent()) return Visit(E->getInit(0)); + return VisitCXXParenListOrInitListExpr(E, E->inits()); +} - const RecordDecl *RD = E->getType()->castAs<RecordType>()->getDecl(); +bool RecordExprEvaluator::VisitCXXParenListOrInitListExpr( + const Expr *ExprToVisit, ArrayRef<Expr *> Args) { + const RecordDecl *RD = + ExprToVisit->getType()->castAs<RecordType>()->getDecl(); if (RD->isInvalidDecl()) return false; const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD); auto *CXXRD = dyn_cast<CXXRecordDecl>(RD); @@ -9808,7 +10358,16 @@ bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) { CXXRD && CXXRD->getNumBases()); if (RD->isUnion()) { - const FieldDecl *Field = E->getInitializedFieldInUnion(); + const FieldDecl *Field; + if (auto *ILE = dyn_cast<InitListExpr>(ExprToVisit)) { + Field = ILE->getInitializedFieldInUnion(); + } else if (auto *PLIE = dyn_cast<CXXParenListInitExpr>(ExprToVisit)) { + Field = PLIE->getInitializedFieldInUnion(); + } else { + llvm_unreachable( + "Expression is neither an init list nor a C++ paren list"); + } + Result = APValue(Field); if (!Field) return true; @@ -9819,7 +10378,7 @@ bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) { // Is this difference ever observable for initializer lists which // we don't build? ImplicitValueInitExpr VIE(Field->getType()); - const Expr *InitExpr = E->getNumInits() ? E->getInit(0) : &VIE; + const Expr *InitExpr = Args.empty() ? &VIE : Args[0]; LValue Subobject = This; if (!HandleLValueMember(Info, InitExpr, Subobject, Field, &Layout)) @@ -9848,8 +10407,8 @@ bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) { // Initialize base classes. if (CXXRD && CXXRD->getNumBases()) { for (const auto &Base : CXXRD->bases()) { - assert(ElementNo < E->getNumInits() && "missing init for base class"); - const Expr *Init = E->getInit(ElementNo); + assert(ElementNo < Args.size() && "missing init for base class"); + const Expr *Init = Args[ElementNo]; LValue Subobject = This; if (!HandleLValueBase(Info, Init, Subobject, CXXRD, &Base)) @@ -9871,23 +10430,34 @@ bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) { for (const auto *Field : RD->fields()) { // Anonymous bit-fields are not considered members of the class for // purposes of aggregate initialization. - if (Field->isUnnamedBitfield()) + if (Field->isUnnamedBitField()) continue; LValue Subobject = This; - bool HaveInit = ElementNo < E->getNumInits(); + bool HaveInit = ElementNo < Args.size(); // FIXME: Diagnostics here should point to the end of the initializer // list, not the start. - if (!HandleLValueMember(Info, HaveInit ? E->getInit(ElementNo) : E, + if (!HandleLValueMember(Info, HaveInit ? Args[ElementNo] : ExprToVisit, Subobject, Field, &Layout)) return false; // Perform an implicit value-initialization for members beyond the end of // the initializer list. ImplicitValueInitExpr VIE(HaveInit ? Info.Ctx.IntTy : Field->getType()); - const Expr *Init = HaveInit ? E->getInit(ElementNo++) : &VIE; + const Expr *Init = HaveInit ? Args[ElementNo++] : &VIE; + + if (Field->getType()->isIncompleteArrayType()) { + if (auto *CAT = Info.Ctx.getAsConstantArrayType(Init->getType())) { + if (!CAT->isZeroSize()) { + // Bail out for now. This might sort of "work", but the rest of the + // code isn't really prepared to handle it. + Info.FFDiag(Init, diag::note_constexpr_unsupported_flexible_array); + return false; + } + } + } // Temporarily override This, in case there's a CXXDefaultInitExpr in here. ThisOverrideRAII ThisOverride(*Info.CurrentCall, &This, @@ -9924,7 +10494,7 @@ bool RecordExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E, if (ZeroInit) return ZeroInitialization(E, T); - return getDefaultInitValue(T, Result); + return handleDefaultInitValue(T, Result); } const FunctionDecl *Definition = nullptr; @@ -9951,7 +10521,7 @@ bool RecordExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E, if (ZeroInit && !ZeroInitialization(E, T)) return false; - auto Args = llvm::makeArrayRef(E->getArgs(), E->getNumArgs()); + auto Args = llvm::ArrayRef(E->getArgs(), E->getNumArgs()); return HandleConstructorCall(E, This, Args, cast<CXXConstructorDecl>(Definition), Info, Result); @@ -9988,51 +10558,42 @@ bool RecordExprEvaluator::VisitCXXStdInitializerListExpr( if (!EvaluateLValue(E->getSubExpr(), Array, Info)) return false; + assert(ArrayType && "unexpected type for array initializer"); + // Get a pointer to the first element of the array. Array.addArray(Info, E, ArrayType); - auto InvalidType = [&] { - Info.FFDiag(E, diag::note_constexpr_unsupported_layout) - << E->getType(); - return false; - }; - - // FIXME: Perform the checks on the field types in SemaInit. - RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl(); - RecordDecl::field_iterator Field = Record->field_begin(); - if (Field == Record->field_end()) - return InvalidType(); - - // Start pointer. - if (!Field->getType()->isPointerType() || - !Info.Ctx.hasSameType(Field->getType()->getPointeeType(), - ArrayType->getElementType())) - return InvalidType(); - // FIXME: What if the initializer_list type has base classes, etc? Result = APValue(APValue::UninitStruct(), 0, 2); Array.moveInto(Result.getStructField(0)); - if (++Field == Record->field_end()) - return InvalidType(); - - if (Field->getType()->isPointerType() && - Info.Ctx.hasSameType(Field->getType()->getPointeeType(), - ArrayType->getElementType())) { + RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl(); + RecordDecl::field_iterator Field = Record->field_begin(); + assert(Field != Record->field_end() && + Info.Ctx.hasSameType(Field->getType()->getPointeeType(), + ArrayType->getElementType()) && + "Expected std::initializer_list first field to be const E *"); + ++Field; + assert(Field != Record->field_end() && + "Expected std::initializer_list to have two fields"); + + if (Info.Ctx.hasSameType(Field->getType(), Info.Ctx.getSizeType())) { + // Length. + Result.getStructField(1) = APValue(APSInt(ArrayType->getSize())); + } else { // End pointer. + assert(Info.Ctx.hasSameType(Field->getType()->getPointeeType(), + ArrayType->getElementType()) && + "Expected std::initializer_list second field to be const E *"); if (!HandleLValueArrayAdjustment(Info, E, Array, ArrayType->getElementType(), - ArrayType->getSize().getZExtValue())) + ArrayType->getZExtSize())) return false; Array.moveInto(Result.getStructField(1)); - } else if (Info.Ctx.hasSameType(Field->getType(), Info.Ctx.getSizeType())) - // Length. - Result.getStructField(1) = APValue(APSInt(ArrayType->getSize())); - else - return InvalidType(); + } - if (++Field != Record->field_end()) - return InvalidType(); + assert(++Field == Record->field_end() && + "Expected std::initializer_list to only have two fields"); return true; } @@ -10054,7 +10615,6 @@ bool RecordExprEvaluator::VisitLambdaExpr(const LambdaExpr *E) { // Iterate through all the lambda's closure object's fields and initialize // them. auto *CaptureInitIt = E->capture_init_begin(); - const LambdaCapture *CaptureIt = ClosureClass->captures_begin(); bool Success = true; const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(ClosureClass); for (const auto *Field : ClosureClass->fields()) { @@ -10078,7 +10638,6 @@ bool RecordExprEvaluator::VisitLambdaExpr(const LambdaExpr *E) { return false; Success = false; } - ++CaptureIt; } return Success; } @@ -10178,8 +10737,12 @@ namespace { bool VisitInitListExpr(const InitListExpr *E); bool VisitUnaryImag(const UnaryOperator *E); bool VisitBinaryOperator(const BinaryOperator *E); - // FIXME: Missing: unary -, unary ~, conditional operator (for GNU - // conditional select), shufflevector, ExtVectorElementExpr + bool VisitUnaryOperator(const UnaryOperator *E); + bool VisitConvertVectorExpr(const ConvertVectorExpr *E); + bool VisitShuffleVectorExpr(const ShuffleVectorExpr *E); + + // FIXME: Missing: conditional operator (for GNU + // conditional select), ExtVectorElementExpr }; } // end anonymous namespace @@ -10218,41 +10781,22 @@ bool VectorExprEvaluator::VisitCastExpr(const CastExpr *E) { return Success(Elts, E); } case CK_BitCast: { - // Evaluate the operand into an APInt we can extract from. - llvm::APInt SValInt; - if (!EvalAndBitcastToAPInt(Info, SE, SValInt)) + APValue SVal; + if (!Evaluate(SVal, Info, SE)) + return false; + + if (!SVal.isInt() && !SVal.isFloat() && !SVal.isVector()) { + // Give up if the input isn't an int, float, or vector. For example, we + // reject "(v4i16)(intptr_t)&a". + Info.FFDiag(E, diag::note_constexpr_invalid_cast) + << 2 << Info.Ctx.getLangOpts().CPlusPlus; return false; - // Extract the elements - QualType EltTy = VTy->getElementType(); - unsigned EltSize = Info.Ctx.getTypeSize(EltTy); - bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian(); - SmallVector<APValue, 4> Elts; - if (EltTy->isRealFloatingType()) { - const llvm::fltSemantics &Sem = Info.Ctx.getFloatTypeSemantics(EltTy); - unsigned FloatEltSize = EltSize; - if (&Sem == &APFloat::x87DoubleExtended()) - FloatEltSize = 80; - for (unsigned i = 0; i < NElts; i++) { - llvm::APInt Elt; - if (BigEndian) - Elt = SValInt.rotl(i*EltSize+FloatEltSize).trunc(FloatEltSize); - else - Elt = SValInt.rotr(i*EltSize).trunc(FloatEltSize); - Elts.push_back(APValue(APFloat(Sem, Elt))); - } - } else if (EltTy->isIntegerType()) { - for (unsigned i = 0; i < NElts; i++) { - llvm::APInt Elt; - if (BigEndian) - Elt = SValInt.rotl(i*EltSize+EltSize).zextOrTrunc(EltSize); - else - Elt = SValInt.rotr(i*EltSize).zextOrTrunc(EltSize); - Elts.push_back(APValue(APSInt(Elt, !EltTy->isSignedIntegerType()))); - } - } else { - return Error(E); } - return Success(Elts, E); + + if (!handleRValueToRValueBitCast(Info, Result, SVal, E)) + return false; + + return true; } default: return ExprEvaluatorBaseTy::VisitCastExpr(E); @@ -10363,6 +10907,208 @@ bool VectorExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) { return Success(LHSValue, E); } +static std::optional<APValue> handleVectorUnaryOperator(ASTContext &Ctx, + QualType ResultTy, + UnaryOperatorKind Op, + APValue Elt) { + switch (Op) { + case UO_Plus: + // Nothing to do here. + return Elt; + case UO_Minus: + if (Elt.getKind() == APValue::Int) { + Elt.getInt().negate(); + } else { + assert(Elt.getKind() == APValue::Float && + "Vector can only be int or float type"); + Elt.getFloat().changeSign(); + } + return Elt; + case UO_Not: + // This is only valid for integral types anyway, so we don't have to handle + // float here. + assert(Elt.getKind() == APValue::Int && + "Vector operator ~ can only be int"); + Elt.getInt().flipAllBits(); + return Elt; + case UO_LNot: { + if (Elt.getKind() == APValue::Int) { + Elt.getInt() = !Elt.getInt(); + // operator ! on vectors returns -1 for 'truth', so negate it. + Elt.getInt().negate(); + return Elt; + } + assert(Elt.getKind() == APValue::Float && + "Vector can only be int or float type"); + // Float types result in an int of the same size, but -1 for true, or 0 for + // false. + APSInt EltResult{Ctx.getIntWidth(ResultTy), + ResultTy->isUnsignedIntegerType()}; + if (Elt.getFloat().isZero()) + EltResult.setAllBits(); + else + EltResult.clearAllBits(); + + return APValue{EltResult}; + } + default: + // FIXME: Implement the rest of the unary operators. + return std::nullopt; + } +} + +bool VectorExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) { + Expr *SubExpr = E->getSubExpr(); + const auto *VD = SubExpr->getType()->castAs<VectorType>(); + // This result element type differs in the case of negating a floating point + // vector, since the result type is the a vector of the equivilant sized + // integer. + const QualType ResultEltTy = VD->getElementType(); + UnaryOperatorKind Op = E->getOpcode(); + + APValue SubExprValue; + if (!Evaluate(SubExprValue, Info, SubExpr)) + return false; + + // FIXME: This vector evaluator someday needs to be changed to be LValue + // aware/keep LValue information around, rather than dealing with just vector + // types directly. Until then, we cannot handle cases where the operand to + // these unary operators is an LValue. The only case I've been able to see + // cause this is operator++ assigning to a member expression (only valid in + // altivec compilations) in C mode, so this shouldn't limit us too much. + if (SubExprValue.isLValue()) + return false; + + assert(SubExprValue.getVectorLength() == VD->getNumElements() && + "Vector length doesn't match type?"); + + SmallVector<APValue, 4> ResultElements; + for (unsigned EltNum = 0; EltNum < VD->getNumElements(); ++EltNum) { + std::optional<APValue> Elt = handleVectorUnaryOperator( + Info.Ctx, ResultEltTy, Op, SubExprValue.getVectorElt(EltNum)); + if (!Elt) + return false; + ResultElements.push_back(*Elt); + } + return Success(APValue(ResultElements.data(), ResultElements.size()), E); +} + +static bool handleVectorElementCast(EvalInfo &Info, const FPOptions FPO, + const Expr *E, QualType SourceTy, + QualType DestTy, APValue const &Original, + APValue &Result) { + if (SourceTy->isIntegerType()) { + if (DestTy->isRealFloatingType()) { + Result = APValue(APFloat(0.0)); + return HandleIntToFloatCast(Info, E, FPO, SourceTy, Original.getInt(), + DestTy, Result.getFloat()); + } + if (DestTy->isIntegerType()) { + Result = APValue( + HandleIntToIntCast(Info, E, DestTy, SourceTy, Original.getInt())); + return true; + } + } else if (SourceTy->isRealFloatingType()) { + if (DestTy->isRealFloatingType()) { + Result = Original; + return HandleFloatToFloatCast(Info, E, SourceTy, DestTy, + Result.getFloat()); + } + if (DestTy->isIntegerType()) { + Result = APValue(APSInt()); + return HandleFloatToIntCast(Info, E, SourceTy, Original.getFloat(), + DestTy, Result.getInt()); + } + } + + Info.FFDiag(E, diag::err_convertvector_constexpr_unsupported_vector_cast) + << SourceTy << DestTy; + return false; +} + +bool VectorExprEvaluator::VisitConvertVectorExpr(const ConvertVectorExpr *E) { + APValue Source; + QualType SourceVecType = E->getSrcExpr()->getType(); + if (!EvaluateAsRValue(Info, E->getSrcExpr(), Source)) + return false; + + QualType DestTy = E->getType()->castAs<VectorType>()->getElementType(); + QualType SourceTy = SourceVecType->castAs<VectorType>()->getElementType(); + + const FPOptions FPO = E->getFPFeaturesInEffect(Info.Ctx.getLangOpts()); + + auto SourceLen = Source.getVectorLength(); + SmallVector<APValue, 4> ResultElements; + ResultElements.reserve(SourceLen); + for (unsigned EltNum = 0; EltNum < SourceLen; ++EltNum) { + APValue Elt; + if (!handleVectorElementCast(Info, FPO, E, SourceTy, DestTy, + Source.getVectorElt(EltNum), Elt)) + return false; + ResultElements.push_back(std::move(Elt)); + } + + return Success(APValue(ResultElements.data(), ResultElements.size()), E); +} + +static bool handleVectorShuffle(EvalInfo &Info, const ShuffleVectorExpr *E, + QualType ElemType, APValue const &VecVal1, + APValue const &VecVal2, unsigned EltNum, + APValue &Result) { + unsigned const TotalElementsInInputVector1 = VecVal1.getVectorLength(); + unsigned const TotalElementsInInputVector2 = VecVal2.getVectorLength(); + + APSInt IndexVal = E->getShuffleMaskIdx(Info.Ctx, EltNum); + int64_t index = IndexVal.getExtValue(); + // The spec says that -1 should be treated as undef for optimizations, + // but in constexpr we'd have to produce an APValue::Indeterminate, + // which is prohibited from being a top-level constant value. Emit a + // diagnostic instead. + if (index == -1) { + Info.FFDiag( + E, diag::err_shufflevector_minus_one_is_undefined_behavior_constexpr) + << EltNum; + return false; + } + + if (index < 0 || + index >= TotalElementsInInputVector1 + TotalElementsInInputVector2) + llvm_unreachable("Out of bounds shuffle index"); + + if (index >= TotalElementsInInputVector1) + Result = VecVal2.getVectorElt(index - TotalElementsInInputVector1); + else + Result = VecVal1.getVectorElt(index); + return true; +} + +bool VectorExprEvaluator::VisitShuffleVectorExpr(const ShuffleVectorExpr *E) { + APValue VecVal1; + const Expr *Vec1 = E->getExpr(0); + if (!EvaluateAsRValue(Info, Vec1, VecVal1)) + return false; + APValue VecVal2; + const Expr *Vec2 = E->getExpr(1); + if (!EvaluateAsRValue(Info, Vec2, VecVal2)) + return false; + + VectorType const *DestVecTy = E->getType()->castAs<VectorType>(); + QualType DestElTy = DestVecTy->getElementType(); + + auto TotalElementsInOutputVector = DestVecTy->getNumElements(); + + SmallVector<APValue, 4> ResultElements; + ResultElements.reserve(TotalElementsInOutputVector); + for (unsigned EltNum = 0; EltNum < TotalElementsInOutputVector; ++EltNum) { + APValue Elt; + if (!handleVectorShuffle(Info, E, DestElTy, VecVal1, VecVal2, EltNum, Elt)) + return false; + ResultElements.push_back(std::move(Elt)); + } + + return Success(APValue(ResultElements.data(), ResultElements.size()), E); +} + //===----------------------------------------------------------------------===// // Array Evaluation //===----------------------------------------------------------------------===// @@ -10398,8 +11144,7 @@ namespace { return Error(E); } - Result = APValue(APValue::UninitArray(), 0, - CAT->getSize().getZExtValue()); + Result = APValue(APValue::UninitArray(), 0, CAT->getZExtSize()); if (!Result.hasArrayFiller()) return true; @@ -10425,6 +11170,11 @@ namespace { expandStringLiteral(Info, E, Result, AllocType); return true; } + bool VisitCXXParenListInitExpr(const CXXParenListInitExpr *E); + bool VisitCXXParenListOrInitListExpr(const Expr *ExprToVisit, + ArrayRef<Expr *> Args, + const Expr *ArrayFiller, + QualType AllocType = QualType()); }; } // end anonymous namespace @@ -10468,6 +11218,11 @@ static bool MaybeElementDependentArrayFiller(const Expr *FillerExpr) { if (MaybeElementDependentArrayFiller(ILE->getInit(I))) return true; } + + if (ILE->hasArrayFiller() && + MaybeElementDependentArrayFiller(ILE->getArrayFiller())) + return true; + return false; } return true; @@ -10483,13 +11238,27 @@ bool ArrayExprEvaluator::VisitInitListExpr(const InitListExpr *E, // C++11 [dcl.init.string]p1: A char array [...] can be initialized by [...] // an appropriately-typed string literal enclosed in braces. if (E->isStringLiteralInit()) { - auto *SL = dyn_cast<StringLiteral>(E->getInit(0)->IgnoreParens()); + auto *SL = dyn_cast<StringLiteral>(E->getInit(0)->IgnoreParenImpCasts()); // FIXME: Support ObjCEncodeExpr here once we support it in // ArrayExprEvaluator generally. if (!SL) return Error(E); return VisitStringLiteral(SL, AllocType); } + // Any other transparent list init will need proper handling of the + // AllocType; we can't just recurse to the inner initializer. + assert(!E->isTransparent() && + "transparent array list initialization is not string literal init?"); + + return VisitCXXParenListOrInitListExpr(E, E->inits(), E->getArrayFiller(), + AllocType); +} + +bool ArrayExprEvaluator::VisitCXXParenListOrInitListExpr( + const Expr *ExprToVisit, ArrayRef<Expr *> Args, const Expr *ArrayFiller, + QualType AllocType) { + const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType( + AllocType.isNull() ? ExprToVisit->getType() : AllocType); bool Success = true; @@ -10499,14 +11268,22 @@ bool ArrayExprEvaluator::VisitInitListExpr(const InitListExpr *E, if (Result.isArray() && Result.hasArrayFiller()) Filler = Result.getArrayFiller(); - unsigned NumEltsToInit = E->getNumInits(); - unsigned NumElts = CAT->getSize().getZExtValue(); - const Expr *FillerExpr = E->hasArrayFiller() ? E->getArrayFiller() : nullptr; + unsigned NumEltsToInit = Args.size(); + unsigned NumElts = CAT->getZExtSize(); // If the initializer might depend on the array index, run it for each // array element. - if (NumEltsToInit != NumElts && MaybeElementDependentArrayFiller(FillerExpr)) + if (NumEltsToInit != NumElts && + MaybeElementDependentArrayFiller(ArrayFiller)) { NumEltsToInit = NumElts; + } else { + for (auto *Init : Args) { + if (auto *EmbedS = dyn_cast<EmbedExpr>(Init->IgnoreParenImpCasts())) + NumEltsToInit += EmbedS->getDataElementCount() - 1; + } + if (NumEltsToInit > NumElts) + NumEltsToInit = NumElts; + } LLVM_DEBUG(llvm::dbgs() << "The number of elements to initialize: " << NumEltsToInit << ".\n"); @@ -10523,18 +11300,50 @@ bool ArrayExprEvaluator::VisitInitListExpr(const InitListExpr *E, } LValue Subobject = This; - Subobject.addArray(Info, E, CAT); - for (unsigned Index = 0; Index != NumEltsToInit; ++Index) { - const Expr *Init = - Index < E->getNumInits() ? E->getInit(Index) : FillerExpr; - if (!EvaluateInPlace(Result.getArrayInitializedElt(Index), - Info, Subobject, Init) || + Subobject.addArray(Info, ExprToVisit, CAT); + auto Eval = [&](const Expr *Init, unsigned ArrayIndex) { + if (!EvaluateInPlace(Result.getArrayInitializedElt(ArrayIndex), Info, + Subobject, Init) || !HandleLValueArrayAdjustment(Info, Init, Subobject, CAT->getElementType(), 1)) { if (!Info.noteFailure()) return false; Success = false; } + return true; + }; + unsigned ArrayIndex = 0; + QualType DestTy = CAT->getElementType(); + APSInt Value(Info.Ctx.getTypeSize(DestTy), DestTy->isUnsignedIntegerType()); + for (unsigned Index = 0; Index != NumEltsToInit; ++Index) { + const Expr *Init = Index < Args.size() ? Args[Index] : ArrayFiller; + if (ArrayIndex >= NumEltsToInit) + break; + if (auto *EmbedS = dyn_cast<EmbedExpr>(Init->IgnoreParenImpCasts())) { + StringLiteral *SL = EmbedS->getDataStringLiteral(); + for (unsigned I = EmbedS->getStartingElementPos(), + N = EmbedS->getDataElementCount(); + I != EmbedS->getStartingElementPos() + N; ++I) { + Value = SL->getCodeUnit(I); + if (DestTy->isIntegerType()) { + Result.getArrayInitializedElt(ArrayIndex) = APValue(Value); + } else { + assert(DestTy->isFloatingType() && "unexpected type"); + const FPOptions FPO = + Init->getFPFeaturesInEffect(Info.Ctx.getLangOpts()); + APFloat FValue(0.0); + if (!HandleIntToFloatCast(Info, Init, FPO, EmbedS->getType(), Value, + DestTy, FValue)) + return false; + Result.getArrayInitializedElt(ArrayIndex) = APValue(FValue); + } + ArrayIndex++; + } + } else { + if (!Eval(Init, ArrayIndex)) + return false; + ++ArrayIndex; + } } if (!Result.hasArrayFiller()) @@ -10542,9 +11351,10 @@ bool ArrayExprEvaluator::VisitInitListExpr(const InitListExpr *E, // If we get here, we have a trivial filler, which we can just evaluate // once and splat over the rest of the array elements. - assert(FillerExpr && "no array filler for incomplete init list"); + assert(ArrayFiller && "no array filler for incomplete init list"); return EvaluateInPlace(Result.getArrayFiller(), Info, Subobject, - FillerExpr) && Success; + ArrayFiller) && + Success; } bool ArrayExprEvaluator::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E) { @@ -10559,7 +11369,7 @@ bool ArrayExprEvaluator::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E) { auto *CAT = cast<ConstantArrayType>(E->getType()->castAsArrayTypeUnsafe()); - uint64_t Elements = CAT->getSize().getZExtValue(); + uint64_t Elements = CAT->getZExtSize(); Result = APValue(APValue::UninitArray(), Elements, Elements); LValue Subobject = This; @@ -10567,6 +11377,16 @@ bool ArrayExprEvaluator::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E) { bool Success = true; for (EvalInfo::ArrayInitLoopIndex Index(Info); Index != Elements; ++Index) { + // C++ [class.temporary]/5 + // There are four contexts in which temporaries are destroyed at a different + // point than the end of the full-expression. [...] The second context is + // when a copy constructor is called to copy an element of an array while + // the entire array is copied [...]. In either case, if the constructor has + // one or more default arguments, the destruction of every temporary created + // in a default argument is sequenced before the construction of the next + // array element, if any. + FullExpressionRAII Scope(Info); + if (!EvaluateInPlace(Result.getArrayInitializedElt(Index), Info, Subobject, E->getSubExpr()) || !HandleLValueArrayAdjustment(Info, E, Subobject, @@ -10575,6 +11395,9 @@ bool ArrayExprEvaluator::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E) { return false; Success = false; } + + // Make sure we run the destructors too. + Scope.destroy(); } return Success; @@ -10591,28 +11414,65 @@ bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E, bool HadZeroInit = Value->hasValue(); if (const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(Type)) { - unsigned N = CAT->getSize().getZExtValue(); + unsigned FinalSize = CAT->getZExtSize(); // Preserve the array filler if we had prior zero-initialization. APValue Filler = HadZeroInit && Value->hasArrayFiller() ? Value->getArrayFiller() : APValue(); - *Value = APValue(APValue::UninitArray(), N, N); - - if (HadZeroInit) - for (unsigned I = 0; I != N; ++I) - Value->getArrayInitializedElt(I) = Filler; + *Value = APValue(APValue::UninitArray(), 0, FinalSize); + if (FinalSize == 0) + return true; - // Initialize the elements. + bool HasTrivialConstructor = CheckTrivialDefaultConstructor( + Info, E->getExprLoc(), E->getConstructor(), + E->requiresZeroInitialization()); LValue ArrayElt = Subobject; ArrayElt.addArray(Info, E, CAT); - for (unsigned I = 0; I != N; ++I) - if (!VisitCXXConstructExpr(E, ArrayElt, &Value->getArrayInitializedElt(I), - CAT->getElementType()) || - !HandleLValueArrayAdjustment(Info, E, ArrayElt, - CAT->getElementType(), 1)) - return false; + // We do the whole initialization in two passes, first for just one element, + // then for the whole array. It's possible we may find out we can't do const + // init in the first pass, in which case we avoid allocating a potentially + // large array. We don't do more passes because expanding array requires + // copying the data, which is wasteful. + for (const unsigned N : {1u, FinalSize}) { + unsigned OldElts = Value->getArrayInitializedElts(); + if (OldElts == N) + break; + + // Expand the array to appropriate size. + APValue NewValue(APValue::UninitArray(), N, FinalSize); + for (unsigned I = 0; I < OldElts; ++I) + NewValue.getArrayInitializedElt(I).swap( + Value->getArrayInitializedElt(I)); + Value->swap(NewValue); + + if (HadZeroInit) + for (unsigned I = OldElts; I < N; ++I) + Value->getArrayInitializedElt(I) = Filler; + + if (HasTrivialConstructor && N == FinalSize && FinalSize != 1) { + // If we have a trivial constructor, only evaluate it once and copy + // the result into all the array elements. + APValue &FirstResult = Value->getArrayInitializedElt(0); + for (unsigned I = OldElts; I < FinalSize; ++I) + Value->getArrayInitializedElt(I) = FirstResult; + } else { + for (unsigned I = OldElts; I < N; ++I) { + if (!VisitCXXConstructExpr(E, ArrayElt, + &Value->getArrayInitializedElt(I), + CAT->getElementType()) || + !HandleLValueArrayAdjustment(Info, E, ArrayElt, + CAT->getElementType(), 1)) + return false; + // When checking for const initilization any diagnostic is considered + // an error. + if (Info.EvalStatus.Diag && !Info.EvalStatus.Diag->empty() && + !Info.keepEvaluatingAfterFailure()) + return false; + } + } + } return true; } @@ -10624,6 +11484,15 @@ bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E, .VisitCXXConstructExpr(E, Type); } +bool ArrayExprEvaluator::VisitCXXParenListInitExpr( + const CXXParenListInitExpr *E) { + assert(E->getType()->isConstantArrayType() && + "Expression result is not a constant array type"); + + return VisitCXXParenListOrInitListExpr(E, E->getInitExprs(), + E->getArrayFiller()); +} + //===----------------------------------------------------------------------===// // Integer Evaluation // @@ -10804,6 +11673,10 @@ class FixedPointExprEvaluator return true; } + bool ZeroInitialization(const Expr *E) { + return Success(0, E); + } + //===--------------------------------------------------------------------===// // Visitor Methods //===--------------------------------------------------------------------===// @@ -10913,44 +11786,13 @@ bool IntExprEvaluator::CheckReferencedDecl(const Expr* E, const Decl* D) { return false; } -/// Values returned by __builtin_classify_type, chosen to match the values -/// produced by GCC's builtin. -enum class GCCTypeClass { - None = -1, - Void = 0, - Integer = 1, - // GCC reserves 2 for character types, but instead classifies them as - // integers. - Enum = 3, - Bool = 4, - Pointer = 5, - // GCC reserves 6 for references, but appears to never use it (because - // expressions never have reference type, presumably). - PointerToDataMember = 7, - RealFloat = 8, - Complex = 9, - // GCC reserves 10 for functions, but does not use it since GCC version 6 due - // to decay to pointer. (Prior to version 6 it was only used in C++ mode). - // GCC claims to reserve 11 for pointers to member functions, but *actually* - // uses 12 for that purpose, same as for a class or struct. Maybe it - // internally implements a pointer to member as a struct? Who knows. - PointerToMemberFunction = 12, // Not a bug, see above. - ClassOrStruct = 12, - Union = 13, - // GCC reserves 14 for arrays, but does not use it since GCC version 6 due to - // decay to pointer. (Prior to version 6 it was only used in C++ mode). - // GCC reserves 15 for strings, but actually uses 5 (pointer) for string - // literals. -}; - /// EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way /// as GCC. -static GCCTypeClass -EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts) { +GCCTypeClass EvaluateBuiltinClassifyType(QualType T, + const LangOptions &LangOpts) { assert(!T->isDependentType() && "unexpected dependent type"); QualType CanTy = T.getCanonicalType(); - const BuiltinType *BT = dyn_cast<BuiltinType>(CanTy); switch (CanTy->getTypeClass()) { #define TYPE(ID, BASE) @@ -10963,7 +11805,7 @@ EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts) { llvm_unreachable("unexpected non-canonical or dependent type"); case Type::Builtin: - switch (BT->getKind()) { + switch (cast<BuiltinType>(CanTy)->getKind()) { #define BUILTIN_TYPE(ID, SINGLETON_ID) #define SIGNED_TYPE(ID, SINGLETON_ID) \ case BuiltinType::ID: return GCCTypeClass::Integer; @@ -11029,6 +11871,10 @@ EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts) { #include "clang/Basic/PPCTypes.def" #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: #include "clang/Basic/RISCVVTypes.def" +#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id: +#include "clang/Basic/WebAssemblyReferenceTypes.def" +#define AMDGPU_TYPE(Name, Id, SingletonId) case BuiltinType::Id: +#include "clang/Basic/AMDGPUTypes.def" return GCCTypeClass::None; case BuiltinType::Dependent: @@ -11045,6 +11891,7 @@ EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts) { case Type::IncompleteArray: case Type::FunctionNoProto: case Type::FunctionProto: + case Type::ArrayParameter: return GCCTypeClass::Pointer; case Type::MemberPointer: @@ -11064,19 +11911,23 @@ EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts) { return EvaluateBuiltinClassifyType( CanTy->castAs<AtomicType>()->getValueType(), LangOpts); - case Type::BlockPointer: case Type::Vector: case Type::ExtVector: + return GCCTypeClass::Vector; + + case Type::BlockPointer: case Type::ConstantMatrix: case Type::ObjCObject: case Type::ObjCInterface: case Type::ObjCObjectPointer: case Type::Pipe: - case Type::ExtInt: - // GCC classifies vectors as None. We follow its lead and classify all - // other types that don't fit into the regular classification the same way. + // Classify all other types that don't fit into the regular + // classification the same way. return GCCTypeClass::None; + case Type::BitInt: + return GCCTypeClass::BitInt; + case Type::LValueReference: case Type::RValueReference: llvm_unreachable("invalid type for expression"); @@ -11200,8 +12051,8 @@ static QualType getObjectType(APValue::LValueBase B) { static const Expr *ignorePointerCastsAndParens(const Expr *E) { assert(E->isPRValue() && E->getType()->hasPointerRepresentation()); - auto *NoParens = E->IgnoreParens(); - auto *Cast = dyn_cast<CastExpr>(NoParens); + const Expr *NoParens = E->IgnoreParens(); + const auto *Cast = dyn_cast<CastExpr>(NoParens); if (Cast == nullptr) return NoParens; @@ -11212,7 +12063,7 @@ static const Expr *ignorePointerCastsAndParens(const Expr *E) { CastKind != CK_AddressSpaceConversion) return NoParens; - auto *SubExpr = Cast->getSubExpr(); + const auto *SubExpr = Cast->getSubExpr(); if (!SubExpr->getType()->hasPointerRepresentation() || !SubExpr->isPRValue()) return NoParens; return ignorePointerCastsAndParens(SubExpr); @@ -11281,7 +12132,7 @@ static bool isDesignatorAtObjectEnd(const ASTContext &Ctx, const LValue &LVal) { return true; const auto *CAT = cast<ConstantArrayType>(Ctx.getAsArrayType(BaseType)); uint64_t Index = Entry.getAsArrayIndex(); - if (Index + 1 != CAT->getSize()) + if (Index + 1 != CAT->getZExtSize()) return false; BaseType = CAT->getElementType(); } else if (BaseType->isAnyComplexType()) { @@ -11338,9 +12189,31 @@ static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const LValue &LVal) { // conservative with the last element in structs (if it's an array), so our // current behavior is more compatible than an explicit list approach would // be. + auto isFlexibleArrayMember = [&] { + using FAMKind = LangOptions::StrictFlexArraysLevelKind; + FAMKind StrictFlexArraysLevel = + Ctx.getLangOpts().getStrictFlexArraysLevel(); + + if (Designator.isMostDerivedAnUnsizedArray()) + return true; + + if (StrictFlexArraysLevel == FAMKind::Default) + return true; + + if (Designator.getMostDerivedArraySize() == 0 && + StrictFlexArraysLevel != FAMKind::IncompleteOnly) + return true; + + if (Designator.getMostDerivedArraySize() == 1 && + StrictFlexArraysLevel == FAMKind::OneZeroOrIncomplete) + return true; + + return false; + }; + return LVal.InvalidBase && Designator.Entries.size() == Designator.MostDerivedPathLength && - Designator.MostDerivedIsArrayElement && + Designator.MostDerivedIsArrayElement && isFlexibleArrayMember() && isDesignatorAtObjectEnd(Ctx, LVal); } @@ -11355,6 +12228,18 @@ static bool convertUnsignedAPIntToCharUnits(const llvm::APInt &Int, return true; } +/// If we're evaluating the object size of an instance of a struct that +/// contains a flexible array member, add the size of the initializer. +static void addFlexibleArrayMemberInitSize(EvalInfo &Info, const QualType &T, + const LValue &LV, CharUnits &Size) { + if (!T.isNull() && T->isStructureType() && + T->getAsStructureType()->getDecl()->hasFlexibleArrayMember()) + if (const auto *V = LV.getLValueBase().dyn_cast<const ValueDecl *>()) + if (const auto *VD = dyn_cast<VarDecl>(V)) + if (VD->hasInit()) + Size += VD->getFlexibleArrayInitChars(Info.Ctx); +} + /// Helper for tryEvaluateBuiltinObjectSize -- Given an LValue, this will /// determine how many bytes exist from the beginning of the object to either /// the end of the current subobject, or the end of the object itself, depending @@ -11389,7 +12274,9 @@ static bool determineEndOffset(EvalInfo &Info, SourceLocation ExprLoc, return false; QualType BaseTy = getObjectType(LVal.getLValueBase()); - return CheckedHandleSizeof(BaseTy, EndOffset); + const bool Ret = CheckedHandleSizeof(BaseTy, EndOffset); + addFlexibleArrayMemberInitSize(Info, BaseTy, LVal, EndOffset); + return Ret; } // We want to evaluate the size of a subobject. @@ -11487,10 +12374,9 @@ static bool tryEvaluateBuiltinObjectSize(const Expr *E, unsigned Type, } bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) { - if (unsigned BuiltinOp = E->getBuiltinCallee()) - return VisitBuiltinCallExpr(E, BuiltinOp); - - return ExprEvaluatorBaseTy::VisitCallExpr(E); + if (!IsConstantEvaluatedBuiltinCall(E)) + return ExprEvaluatorBaseTy::VisitCallExpr(E); + return VisitBuiltinCallExpr(E, E->getBuiltinCallee()); } static bool getBuiltinAlignArguments(const CallExpr *E, EvalInfo &Info, @@ -11524,7 +12410,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, unsigned BuiltinOp) { switch (BuiltinOp) { default: - return ExprEvaluatorBaseTy::VisitCallExpr(E); + return false; case Builtin::BI__builtin_dynamic_object_size: case Builtin::BI__builtin_object_size: { @@ -11653,20 +12539,45 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, if (!EvaluateInteger(E->getArg(0), Val, Info)) return false; - return Success(Val.getBitWidth() - Val.getMinSignedBits(), E); + return Success(Val.getBitWidth() - Val.getSignificantBits(), E); } case Builtin::BI__builtin_clz: case Builtin::BI__builtin_clzl: case Builtin::BI__builtin_clzll: - case Builtin::BI__builtin_clzs: { + case Builtin::BI__builtin_clzs: + case Builtin::BI__builtin_clzg: + case Builtin::BI__lzcnt16: // Microsoft variants of count leading-zeroes + case Builtin::BI__lzcnt: + case Builtin::BI__lzcnt64: { APSInt Val; if (!EvaluateInteger(E->getArg(0), Val, Info)) return false; - if (!Val) - return Error(E); - return Success(Val.countLeadingZeros(), E); + std::optional<APSInt> Fallback; + if (BuiltinOp == Builtin::BI__builtin_clzg && E->getNumArgs() > 1) { + APSInt FallbackTemp; + if (!EvaluateInteger(E->getArg(1), FallbackTemp, Info)) + return false; + Fallback = FallbackTemp; + } + + if (!Val) { + if (Fallback) + return Success(*Fallback, E); + + // When the argument is 0, the result of GCC builtins is undefined, + // whereas for Microsoft intrinsics, the result is the bit-width of the + // argument. + bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 && + BuiltinOp != Builtin::BI__lzcnt && + BuiltinOp != Builtin::BI__lzcnt64; + + if (ZeroIsUndefined) + return Error(E); + } + + return Success(Val.countl_zero(), E); } case Builtin::BI__builtin_constant_p: { @@ -11692,8 +12603,9 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, Callee->getIdentifier()->isStr("is_constant_evaluated")))) { // FIXME: Find a better way to avoid duplicated diagnostics. if (Info.EvalStatus.Diag) - Info.report((Info.CallStackDepth == 1) ? E->getExprLoc() - : Info.CurrentCall->CallLoc, + Info.report((Info.CallStackDepth == 1) + ? E->getExprLoc() + : Info.CurrentCall->getCallRange().getBegin(), diag::warn_is_constant_evaluated_always_true_constexpr) << (Info.CallStackDepth == 1 ? "__builtin_is_constant_evaluated" : "std::is_constant_evaluated"); @@ -11705,14 +12617,28 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, case Builtin::BI__builtin_ctz: case Builtin::BI__builtin_ctzl: case Builtin::BI__builtin_ctzll: - case Builtin::BI__builtin_ctzs: { + case Builtin::BI__builtin_ctzs: + case Builtin::BI__builtin_ctzg: { APSInt Val; if (!EvaluateInteger(E->getArg(0), Val, Info)) return false; - if (!Val) + + std::optional<APSInt> Fallback; + if (BuiltinOp == Builtin::BI__builtin_ctzg && E->getNumArgs() > 1) { + APSInt FallbackTemp; + if (!EvaluateInteger(E->getArg(1), FallbackTemp, Info)) + return false; + Fallback = FallbackTemp; + } + + if (!Val) { + if (Fallback) + return Success(*Fallback, E); + return Error(E); + } - return Success(Val.countTrailingZeros(), E); + return Success(Val.countr_zero(), E); } case Builtin::BI__builtin_eh_return_data_regno: { @@ -11725,6 +12651,13 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, case Builtin::BI__builtin_expect_with_probability: return Visit(E->getArg(0)); + case Builtin::BI__builtin_ptrauth_string_discriminator: { + const auto *Literal = + cast<StringLiteral>(E->getArg(0)->IgnoreParenImpCasts()); + uint64_t Result = getPointerAuthStableSipHash(Literal->getString()); + return Success(Result, E); + } + case Builtin::BI__builtin_ffs: case Builtin::BI__builtin_ffsl: case Builtin::BI__builtin_ffsll: { @@ -11732,7 +12665,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, if (!EvaluateInteger(E->getArg(0), Val, Info)) return false; - unsigned N = Val.countTrailingZeros(); + unsigned N = Val.countr_zero(); return Success(N == Val.getBitWidth() ? 0 : N + 1, E); } @@ -11780,6 +12713,34 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, Success(Val.isNormal() ? 1 : 0, E); } + case Builtin::BI__builtin_issubnormal: { + APFloat Val(0.0); + return EvaluateFloat(E->getArg(0), Val, Info) && + Success(Val.isDenormal() ? 1 : 0, E); + } + + case Builtin::BI__builtin_iszero: { + APFloat Val(0.0); + return EvaluateFloat(E->getArg(0), Val, Info) && + Success(Val.isZero() ? 1 : 0, E); + } + + case Builtin::BI__builtin_issignaling: { + APFloat Val(0.0); + return EvaluateFloat(E->getArg(0), Val, Info) && + Success(Val.isSignaling() ? 1 : 0, E); + } + + case Builtin::BI__builtin_isfpclass: { + APSInt MaskVal; + if (!EvaluateInteger(E->getArg(1), MaskVal, Info)) + return false; + unsigned Test = static_cast<llvm::FPClassTest>(MaskVal.getZExtValue()); + APFloat Val(0.0); + return EvaluateFloat(E->getArg(0), Val, Info) && + Success((Val.classify() & Test) ? 1 : 0, E); + } + case Builtin::BI__builtin_parity: case Builtin::BI__builtin_parityl: case Builtin::BI__builtin_parityll: { @@ -11787,17 +12748,21 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, if (!EvaluateInteger(E->getArg(0), Val, Info)) return false; - return Success(Val.countPopulation() % 2, E); + return Success(Val.popcount() % 2, E); } case Builtin::BI__builtin_popcount: case Builtin::BI__builtin_popcountl: - case Builtin::BI__builtin_popcountll: { + case Builtin::BI__builtin_popcountll: + case Builtin::BI__builtin_popcountg: + case Builtin::BI__popcnt16: // Microsoft variants of popcount + case Builtin::BI__popcnt: + case Builtin::BI__popcnt64: { APSInt Val; if (!EvaluateInteger(E->getArg(0), Val, Info)) return false; - return Success(Val.countPopulation(), E); + return Success(Val.popcount(), E); } case Builtin::BI__builtin_rotateleft8: @@ -11839,55 +12804,19 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, // A call to strlen is not a constant expression. if (Info.getLangOpts().CPlusPlus11) Info.CCEDiag(E, diag::note_constexpr_invalid_function) - << /*isConstexpr*/0 << /*isConstructor*/0 - << (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'"); + << /*isConstexpr*/ 0 << /*isConstructor*/ 0 + << ("'" + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'").str(); else Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr); - LLVM_FALLTHROUGH; + [[fallthrough]]; case Builtin::BI__builtin_strlen: case Builtin::BI__builtin_wcslen: { // As an extension, we support __builtin_strlen() as a constant expression, // and support folding strlen() to a constant. - LValue String; - if (!EvaluatePointer(E->getArg(0), String, Info)) - return false; - - QualType CharTy = E->getArg(0)->getType()->getPointeeType(); - - // Fast path: if it's a string literal, search the string value. - if (const StringLiteral *S = dyn_cast_or_null<StringLiteral>( - String.getLValueBase().dyn_cast<const Expr *>())) { - // The string literal may have embedded null characters. Find the first - // one and truncate there. - StringRef Str = S->getBytes(); - int64_t Off = String.Offset.getQuantity(); - if (Off >= 0 && (uint64_t)Off <= (uint64_t)Str.size() && - S->getCharByteWidth() == 1 && - // FIXME: Add fast-path for wchar_t too. - Info.Ctx.hasSameUnqualifiedType(CharTy, Info.Ctx.CharTy)) { - Str = Str.substr(Off); - - StringRef::size_type Pos = Str.find(0); - if (Pos != StringRef::npos) - Str = Str.substr(0, Pos); - - return Success(Str.size(), E); - } - - // Fall through to slow path to issue appropriate diagnostic. - } - - // Slow path: scan the bytes of the string looking for the terminating 0. - for (uint64_t Strlen = 0; /**/; ++Strlen) { - APValue Char; - if (!handleLValueToRValueConversion(Info, E, CharTy, String, Char) || - !Char.isInt()) - return false; - if (!Char.getInt()) - return Success(Strlen, E); - if (!HandleLValueArrayAdjustment(Info, E, String, CharTy, 1)) - return false; - } + uint64_t StrLen; + if (EvaluateBuiltinStrLen(E->getArg(0), StrLen, Info)) + return Success(StrLen, E); + return false; } case Builtin::BIstrcmp: @@ -11900,11 +12829,11 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, // A call to strlen is not a constant expression. if (Info.getLangOpts().CPlusPlus11) Info.CCEDiag(E, diag::note_constexpr_invalid_function) - << /*isConstexpr*/0 << /*isConstructor*/0 - << (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'"); + << /*isConstexpr*/ 0 << /*isConstructor*/ 0 + << ("'" + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'").str(); else Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr); - LLVM_FALLTHROUGH; + [[fallthrough]]; case Builtin::BI__builtin_strcmp: case Builtin::BI__builtin_wcscmp: case Builtin::BI__builtin_strncmp: @@ -11925,7 +12854,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, APSInt N; if (!EvaluateInteger(E->getArg(2), N, Info)) return false; - MaxLength = N.getExtValue(); + MaxLength = N.getZExtValue(); } // Empty substrings compare equal by definition. @@ -11956,7 +12885,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, !(isOneByteCharacterType(CharTy1) && isOneByteCharacterType(CharTy2))) { // FIXME: Consider using our bit_cast implementation to support this. Info.FFDiag(E, diag::note_constexpr_memcmp_unsupported) - << (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'") + << ("'" + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'").str() << CharTy1 << CharTy2; return false; } @@ -12028,19 +12957,35 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, Info.Ctx.getTargetInfo().getMaxAtomicInlineWidth(); if (Size <= Info.Ctx.toCharUnitsFromBits(InlineWidthBits)) { if (BuiltinOp == Builtin::BI__c11_atomic_is_lock_free || - Size == CharUnits::One() || - E->getArg(1)->isNullPointerConstant(Info.Ctx, - Expr::NPC_NeverValueDependent)) - // OK, we will inline appropriately-aligned operations of this size, - // and _Atomic(T) is appropriately-aligned. + Size == CharUnits::One()) return Success(1, E); - QualType PointeeType = E->getArg(1)->IgnoreImpCasts()->getType()-> - castAs<PointerType>()->getPointeeType(); - if (!PointeeType->isIncompleteType() && - Info.Ctx.getTypeAlignInChars(PointeeType) >= Size) { - // OK, we will inline operations on this object. + // If the pointer argument can be evaluated to a compile-time constant + // integer (or nullptr), check if that value is appropriately aligned. + const Expr *PtrArg = E->getArg(1); + Expr::EvalResult ExprResult; + APSInt IntResult; + if (PtrArg->EvaluateAsRValue(ExprResult, Info.Ctx) && + ExprResult.Val.toIntegralConstant(IntResult, PtrArg->getType(), + Info.Ctx) && + IntResult.isAligned(Size.getAsAlign())) return Success(1, E); + + // Otherwise, check if the type's alignment against Size. + if (auto *ICE = dyn_cast<ImplicitCastExpr>(PtrArg)) { + // Drop the potential implicit-cast to 'const volatile void*', getting + // the underlying type. + if (ICE->getCastKind() == CK_BitCast) + PtrArg = ICE->getSubExpr(); + } + + if (auto PtrTy = PtrArg->getType()->getAs<PointerType>()) { + QualType PointeeType = PtrTy->getPointeeType(); + if (!PointeeType->isIncompleteType() && + Info.Ctx.getTypeAlignInChars(PointeeType) >= Size) { + // OK, we will inline operations on this object. + return Success(1, E); + } } } } @@ -12048,6 +12993,59 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, return BuiltinOp == Builtin::BI__atomic_always_lock_free ? Success(0, E) : Error(E); } + case Builtin::BI__builtin_addcb: + case Builtin::BI__builtin_addcs: + case Builtin::BI__builtin_addc: + case Builtin::BI__builtin_addcl: + case Builtin::BI__builtin_addcll: + case Builtin::BI__builtin_subcb: + case Builtin::BI__builtin_subcs: + case Builtin::BI__builtin_subc: + case Builtin::BI__builtin_subcl: + case Builtin::BI__builtin_subcll: { + LValue CarryOutLValue; + APSInt LHS, RHS, CarryIn, CarryOut, Result; + QualType ResultType = E->getArg(0)->getType(); + if (!EvaluateInteger(E->getArg(0), LHS, Info) || + !EvaluateInteger(E->getArg(1), RHS, Info) || + !EvaluateInteger(E->getArg(2), CarryIn, Info) || + !EvaluatePointer(E->getArg(3), CarryOutLValue, Info)) + return false; + // Copy the number of bits and sign. + Result = LHS; + CarryOut = LHS; + + bool FirstOverflowed = false; + bool SecondOverflowed = false; + switch (BuiltinOp) { + default: + llvm_unreachable("Invalid value for BuiltinOp"); + case Builtin::BI__builtin_addcb: + case Builtin::BI__builtin_addcs: + case Builtin::BI__builtin_addc: + case Builtin::BI__builtin_addcl: + case Builtin::BI__builtin_addcll: + Result = + LHS.uadd_ov(RHS, FirstOverflowed).uadd_ov(CarryIn, SecondOverflowed); + break; + case Builtin::BI__builtin_subcb: + case Builtin::BI__builtin_subcs: + case Builtin::BI__builtin_subc: + case Builtin::BI__builtin_subcl: + case Builtin::BI__builtin_subcll: + Result = + LHS.usub_ov(RHS, FirstOverflowed).usub_ov(CarryIn, SecondOverflowed); + break; + } + + // It is possible for both overflows to happen but CGBuiltin uses an OR so + // this is consistent. + CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed); + APValue APV{CarryOut}; + if (!handleAssignment(Info, E, CarryOutLValue, ResultType, APV)) + return false; + return Success(Result, E); + } case Builtin::BI__builtin_add_overflow: case Builtin::BI__builtin_sub_overflow: case Builtin::BI__builtin_mul_overflow: @@ -12189,6 +13187,10 @@ static bool isOnePastTheEndOfCompleteObject(const ASTContext &Ctx, if (Ty->isIncompleteType()) return true; + // Can't be past the end of an invalid object. + if (LV.getLValueDesignator().Invalid) + return false; + // We're a past-the-end pointer if we point to the byte after the object, // no matter what our type or path is. auto Size = Ctx.getTypeSizeInChars(Ty); @@ -12205,9 +13207,9 @@ namespace { class DataRecursiveIntBinOpEvaluator { struct EvalResult { APValue Val; - bool Failed; + bool Failed = false; - EvalResult() : Failed(false) { } + EvalResult() = default; void swap(EvalResult &RHS) { Val.swap(RHS.Val); @@ -12658,41 +13660,55 @@ EvaluateComparisonBinaryOperator(EvalInfo &Info, const BinaryOperator *E, // Reject differing bases from the normal codepath; we special-case // comparisons to null. if (!HasSameBase(LHSValue, RHSValue)) { + auto DiagComparison = [&] (unsigned DiagID, bool Reversed = false) { + std::string LHS = LHSValue.toString(Info.Ctx, E->getLHS()->getType()); + std::string RHS = RHSValue.toString(Info.Ctx, E->getRHS()->getType()); + Info.FFDiag(E, DiagID) + << (Reversed ? RHS : LHS) << (Reversed ? LHS : RHS); + return false; + }; // Inequalities and subtractions between unrelated pointers have // unspecified or undefined behavior. - if (!IsEquality) { - Info.FFDiag(E, diag::note_constexpr_pointer_comparison_unspecified); - return false; - } + if (!IsEquality) + return DiagComparison( + diag::note_constexpr_pointer_comparison_unspecified); // A constant address may compare equal to the address of a symbol. // The one exception is that address of an object cannot compare equal // to a null pointer constant. + // TODO: Should we restrict this to actual null pointers, and exclude the + // case of zero cast to pointer type? if ((!LHSValue.Base && !LHSValue.Offset.isZero()) || (!RHSValue.Base && !RHSValue.Offset.isZero())) - return Error(E); + return DiagComparison(diag::note_constexpr_pointer_constant_comparison, + !RHSValue.Base); // It's implementation-defined whether distinct literals will have // distinct addresses. In clang, the result of such a comparison is // unspecified, so it is not a constant expression. However, we do know // that the address of a literal will be non-null. if ((IsLiteralLValue(LHSValue) || IsLiteralLValue(RHSValue)) && LHSValue.Base && RHSValue.Base) - return Error(E); + return DiagComparison(diag::note_constexpr_literal_comparison); // We can't tell whether weak symbols will end up pointing to the same // object. if (IsWeakLValue(LHSValue) || IsWeakLValue(RHSValue)) - return Error(E); + return DiagComparison(diag::note_constexpr_pointer_weak_comparison, + !IsWeakLValue(LHSValue)); // We can't compare the address of the start of one object with the // past-the-end address of another object, per C++ DR1652. - if ((LHSValue.Base && LHSValue.Offset.isZero() && - isOnePastTheEndOfCompleteObject(Info.Ctx, RHSValue)) || - (RHSValue.Base && RHSValue.Offset.isZero() && - isOnePastTheEndOfCompleteObject(Info.Ctx, LHSValue))) - return Error(E); + if (LHSValue.Base && LHSValue.Offset.isZero() && + isOnePastTheEndOfCompleteObject(Info.Ctx, RHSValue)) + return DiagComparison(diag::note_constexpr_pointer_comparison_past_end, + true); + if (RHSValue.Base && RHSValue.Offset.isZero() && + isOnePastTheEndOfCompleteObject(Info.Ctx, LHSValue)) + return DiagComparison(diag::note_constexpr_pointer_comparison_past_end, + false); // We can't tell whether an object is at the same address as another // zero sized object. if ((RHSValue.Base && isZeroSized(LHSValue)) || (LHSValue.Base && isZeroSized(RHSValue))) - return Error(E); + return DiagComparison( + diag::note_constexpr_pointer_comparison_zero_sized); return Success(CmpResult::Unequal, E); } @@ -12796,6 +13812,19 @@ EvaluateComparisonBinaryOperator(EvalInfo &Info, const BinaryOperator *E, if (!EvaluateMemberPointer(E->getRHS(), RHSValue, Info) || !LHSOK) return false; + // If either operand is a pointer to a weak function, the comparison is not + // constant. + if (LHSValue.getDecl() && LHSValue.getDecl()->isWeak()) { + Info.FFDiag(E, diag::note_constexpr_mem_pointer_weak_comparison) + << LHSValue.getDecl(); + return false; + } + if (RHSValue.getDecl() && RHSValue.getDecl()->isWeak()) { + Info.FFDiag(E, diag::note_constexpr_mem_pointer_weak_comparison) + << RHSValue.getDecl(); + return false; + } + // C++11 [expr.eq]p2: // If both operands are null, they compare equal. Otherwise if only one is // null, they compare unequal. @@ -12827,6 +13856,10 @@ EvaluateComparisonBinaryOperator(EvalInfo &Info, const BinaryOperator *E, // C++11 [expr.rel]p4, [expr.eq]p3: If two operands of type std::nullptr_t // are compared, the result is true of the operator is <=, >= or ==, and // false otherwise. + LValue Res; + if (!EvaluatePointer(E->getLHS(), Res, Info) || + !EvaluatePointer(E->getRHS(), Res, Info)) + return false; return Success(CmpResult::Equal, E); } @@ -12873,6 +13906,11 @@ bool RecordExprEvaluator::VisitBinCmp(const BinaryOperator *E) { }); } +bool RecordExprEvaluator::VisitCXXParenListInitExpr( + const CXXParenListInitExpr *E) { + return VisitCXXParenListOrInitListExpr(E, E->getInitExprs()); +} + bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) { // We don't support assignment in C. C++ assignments don't get here because // assignment is an lvalue in C++. @@ -13024,6 +14062,12 @@ bool IntExprEvaluator::VisitUnaryExprOrTypeTraitExpr( E); } + case UETT_PtrAuthTypeDiscriminator: { + if (E->getArgumentType()->isDependentType()) + return false; + return Success( + Info.Ctx.getPointerAuthTypeDiscriminator(E->getArgumentType()), E); + } case UETT_VecStep: { QualType Ty = E->getTypeOfArgument(); @@ -13040,6 +14084,7 @@ bool IntExprEvaluator::VisitUnaryExprOrTypeTraitExpr( return Success(1, E); } + case UETT_DataSizeOf: case UETT_SizeOf: { QualType SrcTy = E->getTypeOfArgument(); // C++ [expr.sizeof]p2: "When applied to a reference or a reference type, @@ -13048,8 +14093,11 @@ bool IntExprEvaluator::VisitUnaryExprOrTypeTraitExpr( SrcTy = Ref->getPointeeType(); CharUnits Sizeof; - if (!HandleSizeof(Info, E->getExprLoc(), SrcTy, Sizeof)) + if (!HandleSizeof(Info, E->getExprLoc(), SrcTy, Sizeof, + E->getKind() == UETT_DataSizeOf ? SizeOfType::DataSizeOf + : SizeOfType::SizeOf)) { return false; + } return Success(Sizeof, E); } case UETT_OpenMPRequiredSimdAlign: @@ -13059,6 +14107,20 @@ bool IntExprEvaluator::VisitUnaryExprOrTypeTraitExpr( Info.Ctx.getOpenMPDefaultSimdAlign(E->getArgumentType())) .getQuantity(), E); + case UETT_VectorElements: { + QualType Ty = E->getTypeOfArgument(); + // If the vector has a fixed size, we can determine the number of elements + // at compile time. + if (const auto *VT = Ty->getAs<VectorType>()) + return Success(VT->getNumElements(), E); + + assert(Ty->isSizelessVectorType()); + if (Info.InConstantContext) + Info.CCEDiag(E, diag::note_constexpr_non_const_vectorelements) + << E->getSourceRange(); + + return false; + } } llvm_unreachable("unknown expr/type trait"); @@ -13151,10 +14213,18 @@ bool IntExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) { return false; if (!Result.isInt()) return Error(E); const APSInt &Value = Result.getInt(); - if (Value.isSigned() && Value.isMinSignedValue() && E->canOverflow() && - !HandleOverflow(Info, E, -Value.extend(Value.getBitWidth() + 1), - E->getType())) - return false; + if (Value.isSigned() && Value.isMinSignedValue() && E->canOverflow()) { + if (Info.checkingForUndefinedBehavior()) + Info.Ctx.getDiagnostics().Report(E->getExprLoc(), + diag::warn_integer_constant_overflow) + << toString(Value, 10, Value.isSigned(), /*formatAsCLiteral=*/false, + /*UpperCase=*/true, /*InsertSeparators=*/true) + << E->getType() << E->getSourceRange(); + + if (!HandleOverflow(Info, E, -Value.extend(Value.getBitWidth() + 1), + E->getType())) + return false; + } return Success(-Value, E); } case UO_Not: { @@ -13219,6 +14289,7 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) { case CK_FixedPointCast: case CK_IntegralToFixedPoint: case CK_MatrixCast: + case CK_HLSLVectorTruncation: llvm_unreachable("invalid cast kind for integral value"); case CK_BitCast: @@ -13236,6 +14307,7 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) { case CK_AtomicToNonAtomic: case CK_NoOp: case CK_LValueToRValueBitCast: + case CK_HLSLArrayRValue: return ExprEvaluatorBaseTy::VisitCastExpr(E); case CK_MemberPointerToBoolean: @@ -13291,12 +14363,63 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) { return Info.Ctx.getTypeSize(DestType) == Info.Ctx.getTypeSize(SrcType); } + if (Info.Ctx.getLangOpts().CPlusPlus && Info.InConstantContext && + Info.EvalMode == EvalInfo::EM_ConstantExpression && + DestType->isEnumeralType()) { + + bool ConstexprVar = true; + + // We know if we are here that we are in a context that we might require + // a constant expression or a context that requires a constant + // value. But if we are initializing a value we don't know if it is a + // constexpr variable or not. We can check the EvaluatingDecl to determine + // if it constexpr or not. If not then we don't want to emit a diagnostic. + if (const auto *VD = dyn_cast_or_null<VarDecl>( + Info.EvaluatingDecl.dyn_cast<const ValueDecl *>())) + ConstexprVar = VD->isConstexpr(); + + const EnumType *ET = dyn_cast<EnumType>(DestType.getCanonicalType()); + const EnumDecl *ED = ET->getDecl(); + // Check that the value is within the range of the enumeration values. + // + // This corressponds to [expr.static.cast]p10 which says: + // A value of integral or enumeration type can be explicitly converted + // to a complete enumeration type ... If the enumeration type does not + // have a fixed underlying type, the value is unchanged if the original + // value is within the range of the enumeration values ([dcl.enum]), and + // otherwise, the behavior is undefined. + // + // This was resolved as part of DR2338 which has CD5 status. + if (!ED->isFixed()) { + llvm::APInt Min; + llvm::APInt Max; + + ED->getValueRange(Max, Min); + --Max; + + if (ED->getNumNegativeBits() && ConstexprVar && + (Max.slt(Result.getInt().getSExtValue()) || + Min.sgt(Result.getInt().getSExtValue()))) + Info.Ctx.getDiagnostics().Report( + E->getExprLoc(), diag::warn_constexpr_unscoped_enum_out_of_range) + << llvm::toString(Result.getInt(), 10) << Min.getSExtValue() + << Max.getSExtValue() << ED; + else if (!ED->getNumNegativeBits() && ConstexprVar && + Max.ult(Result.getInt().getZExtValue())) + Info.Ctx.getDiagnostics().Report( + E->getExprLoc(), diag::warn_constexpr_unscoped_enum_out_of_range) + << llvm::toString(Result.getInt(), 10) << Min.getZExtValue() + << Max.getZExtValue() << ED; + } + } + return Success(HandleIntToIntCast(Info, E, DestType, SrcType, Result.getInt()), E); } case CK_PointerToIntegral: { - CCEDiag(E, diag::note_constexpr_invalid_cast) << 2; + CCEDiag(E, diag::note_constexpr_invalid_cast) + << 2 << Info.Ctx.getLangOpts().CPlusPlus << E->getSourceRange(); LValue LV; if (!EvaluatePointer(SubExpr, LV, Info)) @@ -13650,17 +14773,22 @@ static bool TryEvaluateBuiltinNaN(const ASTContext &Context, } bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) { + if (!IsConstantEvaluatedBuiltinCall(E)) + return ExprEvaluatorBaseTy::VisitCallExpr(E); + switch (E->getBuiltinCallee()) { default: - return ExprEvaluatorBaseTy::VisitCallExpr(E); + return false; case Builtin::BI__builtin_huge_val: case Builtin::BI__builtin_huge_valf: case Builtin::BI__builtin_huge_vall: + case Builtin::BI__builtin_huge_valf16: case Builtin::BI__builtin_huge_valf128: case Builtin::BI__builtin_inf: case Builtin::BI__builtin_inff: case Builtin::BI__builtin_infl: + case Builtin::BI__builtin_inff16: case Builtin::BI__builtin_inff128: { const llvm::fltSemantics &Sem = Info.Ctx.getFloatTypeSemantics(E->getType()); @@ -13671,6 +14799,7 @@ bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) { case Builtin::BI__builtin_nans: case Builtin::BI__builtin_nansf: case Builtin::BI__builtin_nansl: + case Builtin::BI__builtin_nansf16: case Builtin::BI__builtin_nansf128: if (!TryEvaluateBuiltinNaN(Info.Ctx, E->getType(), E->getArg(0), true, Result)) @@ -13680,6 +14809,7 @@ bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) { case Builtin::BI__builtin_nan: case Builtin::BI__builtin_nanf: case Builtin::BI__builtin_nanl: + case Builtin::BI__builtin_nanf16: case Builtin::BI__builtin_nanf128: // If this is __builtin_nan() turn this into a nan, otherwise we // can't constant fold it. @@ -13722,6 +14852,42 @@ bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) { Result.copySign(RHS); return true; } + + case Builtin::BI__builtin_fmax: + case Builtin::BI__builtin_fmaxf: + case Builtin::BI__builtin_fmaxl: + case Builtin::BI__builtin_fmaxf16: + case Builtin::BI__builtin_fmaxf128: { + // TODO: Handle sNaN. + APFloat RHS(0.); + if (!EvaluateFloat(E->getArg(0), Result, Info) || + !EvaluateFloat(E->getArg(1), RHS, Info)) + return false; + // When comparing zeroes, return +0.0 if one of the zeroes is positive. + if (Result.isZero() && RHS.isZero() && Result.isNegative()) + Result = RHS; + else if (Result.isNaN() || RHS > Result) + Result = RHS; + return true; + } + + case Builtin::BI__builtin_fmin: + case Builtin::BI__builtin_fminf: + case Builtin::BI__builtin_fminl: + case Builtin::BI__builtin_fminf16: + case Builtin::BI__builtin_fminf128: { + // TODO: Handle sNaN. + APFloat RHS(0.); + if (!EvaluateFloat(E->getArg(0), Result, Info) || + !EvaluateFloat(E->getArg(1), RHS, Info)) + return false; + // When comparing zeroes, return -0.0 if one of the zeroes is negative. + if (Result.isZero() && RHS.isZero() && RHS.isNegative()) + Result = RHS; + else if (Result.isNaN() || RHS < Result) + Result = RHS; + return true; + } } } @@ -13963,12 +15129,14 @@ bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) { case CK_FixedPointToIntegral: case CK_IntegralToFixedPoint: case CK_MatrixCast: + case CK_HLSLVectorTruncation: llvm_unreachable("invalid cast kind for complex value"); case CK_LValueToRValue: case CK_AtomicToNonAtomic: case CK_NoOp: case CK_LValueToRValueBitCast: + case CK_HLSLArrayRValue: return ExprEvaluatorBaseTy::VisitCastExpr(E); case CK_Dependent: @@ -14055,6 +15223,104 @@ bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) { llvm_unreachable("unknown cast resulting in complex value"); } +void HandleComplexComplexMul(APFloat A, APFloat B, APFloat C, APFloat D, + APFloat &ResR, APFloat &ResI) { + // This is an implementation of complex multiplication according to the + // constraints laid out in C11 Annex G. The implementation uses the + // following naming scheme: + // (a + ib) * (c + id) + + APFloat AC = A * C; + APFloat BD = B * D; + APFloat AD = A * D; + APFloat BC = B * C; + ResR = AC - BD; + ResI = AD + BC; + if (ResR.isNaN() && ResI.isNaN()) { + bool Recalc = false; + if (A.isInfinity() || B.isInfinity()) { + A = APFloat::copySign(APFloat(A.getSemantics(), A.isInfinity() ? 1 : 0), + A); + B = APFloat::copySign(APFloat(B.getSemantics(), B.isInfinity() ? 1 : 0), + B); + if (C.isNaN()) + C = APFloat::copySign(APFloat(C.getSemantics()), C); + if (D.isNaN()) + D = APFloat::copySign(APFloat(D.getSemantics()), D); + Recalc = true; + } + if (C.isInfinity() || D.isInfinity()) { + C = APFloat::copySign(APFloat(C.getSemantics(), C.isInfinity() ? 1 : 0), + C); + D = APFloat::copySign(APFloat(D.getSemantics(), D.isInfinity() ? 1 : 0), + D); + if (A.isNaN()) + A = APFloat::copySign(APFloat(A.getSemantics()), A); + if (B.isNaN()) + B = APFloat::copySign(APFloat(B.getSemantics()), B); + Recalc = true; + } + if (!Recalc && (AC.isInfinity() || BD.isInfinity() || AD.isInfinity() || + BC.isInfinity())) { + if (A.isNaN()) + A = APFloat::copySign(APFloat(A.getSemantics()), A); + if (B.isNaN()) + B = APFloat::copySign(APFloat(B.getSemantics()), B); + if (C.isNaN()) + C = APFloat::copySign(APFloat(C.getSemantics()), C); + if (D.isNaN()) + D = APFloat::copySign(APFloat(D.getSemantics()), D); + Recalc = true; + } + if (Recalc) { + ResR = APFloat::getInf(A.getSemantics()) * (A * C - B * D); + ResI = APFloat::getInf(A.getSemantics()) * (A * D + B * C); + } + } +} + +void HandleComplexComplexDiv(APFloat A, APFloat B, APFloat C, APFloat D, + APFloat &ResR, APFloat &ResI) { + // This is an implementation of complex division according to the + // constraints laid out in C11 Annex G. The implementation uses the + // following naming scheme: + // (a + ib) / (c + id) + + int DenomLogB = 0; + APFloat MaxCD = maxnum(abs(C), abs(D)); + if (MaxCD.isFinite()) { + DenomLogB = ilogb(MaxCD); + C = scalbn(C, -DenomLogB, APFloat::rmNearestTiesToEven); + D = scalbn(D, -DenomLogB, APFloat::rmNearestTiesToEven); + } + APFloat Denom = C * C + D * D; + ResR = + scalbn((A * C + B * D) / Denom, -DenomLogB, APFloat::rmNearestTiesToEven); + ResI = + scalbn((B * C - A * D) / Denom, -DenomLogB, APFloat::rmNearestTiesToEven); + if (ResR.isNaN() && ResI.isNaN()) { + if (Denom.isPosZero() && (!A.isNaN() || !B.isNaN())) { + ResR = APFloat::getInf(ResR.getSemantics(), C.isNegative()) * A; + ResI = APFloat::getInf(ResR.getSemantics(), C.isNegative()) * B; + } else if ((A.isInfinity() || B.isInfinity()) && C.isFinite() && + D.isFinite()) { + A = APFloat::copySign(APFloat(A.getSemantics(), A.isInfinity() ? 1 : 0), + A); + B = APFloat::copySign(APFloat(B.getSemantics(), B.isInfinity() ? 1 : 0), + B); + ResR = APFloat::getInf(ResR.getSemantics()) * (A * C + B * D); + ResI = APFloat::getInf(ResI.getSemantics()) * (B * C - A * D); + } else if (MaxCD.isInfinity() && A.isFinite() && B.isFinite()) { + C = APFloat::copySign(APFloat(C.getSemantics(), C.isInfinity() ? 1 : 0), + C); + D = APFloat::copySign(APFloat(D.getSemantics(), D.isInfinity() ? 1 : 0), + D); + ResR = APFloat::getZero(ResR.getSemantics()) * (A * C + B * D); + ResI = APFloat::getZero(ResI.getSemantics()) * (B * C - A * D); + } + } +} + bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) { if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma) return ExprEvaluatorBaseTy::VisitBinaryOperator(E); @@ -14138,61 +15404,23 @@ bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) { APFloat &ResI = Result.getComplexFloatImag(); if (LHSReal) { assert(!RHSReal && "Cannot have two real operands for a complex op!"); - ResR = A * C; - ResI = A * D; + ResR = A; + ResI = A; + // ResR = A * C; + // ResI = A * D; + if (!handleFloatFloatBinOp(Info, E, ResR, BO_Mul, C) || + !handleFloatFloatBinOp(Info, E, ResI, BO_Mul, D)) + return false; } else if (RHSReal) { - ResR = C * A; - ResI = C * B; + // ResR = C * A; + // ResI = C * B; + ResR = C; + ResI = C; + if (!handleFloatFloatBinOp(Info, E, ResR, BO_Mul, A) || + !handleFloatFloatBinOp(Info, E, ResI, BO_Mul, B)) + return false; } else { - // In the fully general case, we need to handle NaNs and infinities - // robustly. - APFloat AC = A * C; - APFloat BD = B * D; - APFloat AD = A * D; - APFloat BC = B * C; - ResR = AC - BD; - ResI = AD + BC; - if (ResR.isNaN() && ResI.isNaN()) { - bool Recalc = false; - if (A.isInfinity() || B.isInfinity()) { - A = APFloat::copySign( - APFloat(A.getSemantics(), A.isInfinity() ? 1 : 0), A); - B = APFloat::copySign( - APFloat(B.getSemantics(), B.isInfinity() ? 1 : 0), B); - if (C.isNaN()) - C = APFloat::copySign(APFloat(C.getSemantics()), C); - if (D.isNaN()) - D = APFloat::copySign(APFloat(D.getSemantics()), D); - Recalc = true; - } - if (C.isInfinity() || D.isInfinity()) { - C = APFloat::copySign( - APFloat(C.getSemantics(), C.isInfinity() ? 1 : 0), C); - D = APFloat::copySign( - APFloat(D.getSemantics(), D.isInfinity() ? 1 : 0), D); - if (A.isNaN()) - A = APFloat::copySign(APFloat(A.getSemantics()), A); - if (B.isNaN()) - B = APFloat::copySign(APFloat(B.getSemantics()), B); - Recalc = true; - } - if (!Recalc && (AC.isInfinity() || BD.isInfinity() || - AD.isInfinity() || BC.isInfinity())) { - if (A.isNaN()) - A = APFloat::copySign(APFloat(A.getSemantics()), A); - if (B.isNaN()) - B = APFloat::copySign(APFloat(B.getSemantics()), B); - if (C.isNaN()) - C = APFloat::copySign(APFloat(C.getSemantics()), C); - if (D.isNaN()) - D = APFloat::copySign(APFloat(D.getSemantics()), D); - Recalc = true; - } - if (Recalc) { - ResR = APFloat::getInf(A.getSemantics()) * (A * C - B * D); - ResI = APFloat::getInf(A.getSemantics()) * (A * D + B * C); - } - } + HandleComplexComplexMul(A, B, C, D, ResR, ResI); } } else { ComplexValue LHS = Result; @@ -14218,46 +15446,19 @@ bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) { APFloat &ResR = Result.getComplexFloatReal(); APFloat &ResI = Result.getComplexFloatImag(); if (RHSReal) { - ResR = A / C; - ResI = B / C; + ResR = A; + ResI = B; + // ResR = A / C; + // ResI = B / C; + if (!handleFloatFloatBinOp(Info, E, ResR, BO_Div, C) || + !handleFloatFloatBinOp(Info, E, ResI, BO_Div, C)) + return false; } else { if (LHSReal) { // No real optimizations we can do here, stub out with zero. B = APFloat::getZero(A.getSemantics()); } - int DenomLogB = 0; - APFloat MaxCD = maxnum(abs(C), abs(D)); - if (MaxCD.isFinite()) { - DenomLogB = ilogb(MaxCD); - C = scalbn(C, -DenomLogB, APFloat::rmNearestTiesToEven); - D = scalbn(D, -DenomLogB, APFloat::rmNearestTiesToEven); - } - APFloat Denom = C * C + D * D; - ResR = scalbn((A * C + B * D) / Denom, -DenomLogB, - APFloat::rmNearestTiesToEven); - ResI = scalbn((B * C - A * D) / Denom, -DenomLogB, - APFloat::rmNearestTiesToEven); - if (ResR.isNaN() && ResI.isNaN()) { - if (Denom.isPosZero() && (!A.isNaN() || !B.isNaN())) { - ResR = APFloat::getInf(ResR.getSemantics(), C.isNegative()) * A; - ResI = APFloat::getInf(ResR.getSemantics(), C.isNegative()) * B; - } else if ((A.isInfinity() || B.isInfinity()) && C.isFinite() && - D.isFinite()) { - A = APFloat::copySign( - APFloat(A.getSemantics(), A.isInfinity() ? 1 : 0), A); - B = APFloat::copySign( - APFloat(B.getSemantics(), B.isInfinity() ? 1 : 0), B); - ResR = APFloat::getInf(ResR.getSemantics()) * (A * C + B * D); - ResI = APFloat::getInf(ResI.getSemantics()) * (B * C - A * D); - } else if (MaxCD.isInfinity() && A.isFinite() && B.isFinite()) { - C = APFloat::copySign( - APFloat(C.getSemantics(), C.isInfinity() ? 1 : 0), C); - D = APFloat::copySign( - APFloat(D.getSemantics(), D.isInfinity() ? 1 : 0), D); - ResR = APFloat::getZero(ResR.getSemantics()) * (A * C + B * D); - ResI = APFloat::getZero(ResI.getSemantics()) * (B * C - A * D); - } - } + HandleComplexComplexDiv(A, B, C, D, ResR, ResI); } } else { if (RHS.getComplexIntReal() == 0 && RHS.getComplexIntImag() == 0) @@ -14332,6 +15533,9 @@ bool ComplexExprEvaluator::VisitInitListExpr(const InitListExpr *E) { } bool ComplexExprEvaluator::VisitCallExpr(const CallExpr *E) { + if (!IsConstantEvaluatedBuiltinCall(E)) + return ExprEvaluatorBaseTy::VisitCallExpr(E); + switch (E->getBuiltinCallee()) { case Builtin::BI__builtin_complex: Result.makeComplexFloat(); @@ -14342,10 +15546,8 @@ bool ComplexExprEvaluator::VisitCallExpr(const CallExpr *E) { return true; default: - break; + return false; } - - return ExprEvaluatorBaseTy::VisitCallExpr(E); } //===----------------------------------------------------------------------===// @@ -14380,6 +15582,9 @@ public: switch (E->getCastKind()) { default: return ExprEvaluatorBaseTy::VisitCastExpr(E); + case CK_NullToPointer: + VisitIgnoredValue(E->getSubExpr()); + return ZeroInitialization(E); case CK_NonAtomicToAtomic: return This ? EvaluateInPlace(Result, Info, *This, E->getSubExpr()) : Evaluate(Result, Info, E->getSubExpr()); @@ -14421,6 +15626,9 @@ public: } bool VisitCallExpr(const CallExpr *E) { + if (!IsConstantEvaluatedBuiltinCall(E)) + return ExprEvaluatorBaseTy::VisitCallExpr(E); + switch (E->getBuiltinCallee()) { case Builtin::BI__assume: case Builtin::BI__builtin_assume: @@ -14431,10 +15639,8 @@ public: return HandleOperatorDeleteCall(Info, E); default: - break; + return false; } - - return ExprEvaluatorBaseTy::VisitCallExpr(E); } bool VisitCXXDeleteExpr(const CXXDeleteExpr *E); @@ -14471,7 +15677,7 @@ bool VoidExprEvaluator::VisitCXXDeleteExpr(const CXXDeleteExpr *E) { return true; } - Optional<DynAlloc *> Alloc = CheckDeleteKind( + std::optional<DynAlloc *> Alloc = CheckDeleteKind( Info, E, Pointer, E->isArrayForm() ? DynAlloc::ArrayNew : DynAlloc::New); if (!Alloc) return false; @@ -14591,6 +15797,7 @@ static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E) { E, Unqual, ScopeKind::FullExpression, LV); if (!EvaluateAtomic(E, &LV, Value, Info)) return false; + Result = Value; } else { if (!EvaluateAtomic(E, nullptr, Result, Info)) return false; @@ -14639,25 +15846,29 @@ static bool EvaluateInPlace(APValue &Result, EvalInfo &Info, const LValue &This, /// lvalue-to-rvalue cast if it is an lvalue. static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result) { assert(!E->isValueDependent()); + + if (E->getType().isNull()) + return false; + + if (!CheckLiteralType(Info, E)) + return false; + if (Info.EnableNewConstInterp) { if (!Info.Ctx.getInterpContext().evaluateAsRValue(Info, E, Result)) return false; - } else { - if (E->getType().isNull()) - return false; + return CheckConstantExpression(Info, E->getExprLoc(), E->getType(), Result, + ConstantExprKind::Normal); + } - if (!CheckLiteralType(Info, E)) - return false; + if (!::Evaluate(Result, Info, E)) + return false; - if (!::Evaluate(Result, Info, E)) + // Implicit lvalue-to-rvalue cast. + if (E->isGLValue()) { + LValue LV; + LV.setFrom(Info.Ctx, Result); + if (!handleLValueToRValueConversion(Info, E, E->getType(), LV, Result)) return false; - - if (E->isGLValue()) { - LValue LV; - LV.setFrom(Info.Ctx, Result); - if (!handleLValueToRValueConversion(Info, E, E->getType(), LV, Result)) - return false; - } } // Check this core constant expression is a constant expression. @@ -14677,6 +15888,26 @@ static bool FastEvaluateAsRValue(const Expr *Exp, Expr::EvalResult &Result, return true; } + if (const auto *L = dyn_cast<CXXBoolLiteralExpr>(Exp)) { + Result.Val = APValue(APSInt(APInt(1, L->getValue()))); + IsConst = true; + return true; + } + + if (const auto *CE = dyn_cast<ConstantExpr>(Exp)) { + if (CE->hasAPValueResult()) { + APValue APV = CE->getAPValueResult(); + if (!APV.isLValue()) { + Result.Val = std::move(APV); + IsConst = true; + return true; + } + } + + // The SubExpr is usually just an IntegerLiteral. + return FastEvaluateAsRValue(CE->getSubExpr(), Result, Ctx, IsConst); + } + // This case should be rare, but we need to check it before we check on // the type below. if (Exp->getType().isNull()) { @@ -14684,14 +15915,6 @@ static bool FastEvaluateAsRValue(const Expr *Exp, Expr::EvalResult &Result, return true; } - // FIXME: Evaluating values of large array and record types can cause - // performance problems. Only do so in C++11 for now. - if (Exp->isPRValue() && - (Exp->getType()->isArrayType() || Exp->getType()->isRecordType()) && - !Ctx.getLangOpts().CPlusPlus11) { - IsConst = false; - return true; - } return false; } @@ -14754,6 +15977,7 @@ bool Expr::EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext) const { assert(!isValueDependent() && "Expression evaluator can't be called on a dependent expression."); + ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsRValue"); EvalInfo Info(Ctx, Result, EvalInfo::EM_IgnoreSideEffects); Info.InConstantContext = InConstantContext; return ::EvaluateAsRValue(this, Result, Ctx, Info); @@ -14763,6 +15987,7 @@ bool Expr::EvaluateAsBooleanCondition(bool &Result, const ASTContext &Ctx, bool InConstantContext) const { assert(!isValueDependent() && "Expression evaluator can't be called on a dependent expression."); + ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsBooleanCondition"); EvalResult Scratch; return EvaluateAsRValue(Scratch, Ctx, InConstantContext) && HandleConversionToBool(Scratch.Val, Result); @@ -14773,6 +15998,7 @@ bool Expr::EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext) const { assert(!isValueDependent() && "Expression evaluator can't be called on a dependent expression."); + ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsInt"); EvalInfo Info(Ctx, Result, EvalInfo::EM_IgnoreSideEffects); Info.InConstantContext = InConstantContext; return ::EvaluateAsInt(this, Result, Ctx, AllowSideEffects, Info); @@ -14783,6 +16009,7 @@ bool Expr::EvaluateAsFixedPoint(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext) const { assert(!isValueDependent() && "Expression evaluator can't be called on a dependent expression."); + ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsFixedPoint"); EvalInfo Info(Ctx, Result, EvalInfo::EM_IgnoreSideEffects); Info.InConstantContext = InConstantContext; return ::EvaluateAsFixedPoint(this, Result, Ctx, AllowSideEffects, Info); @@ -14797,6 +16024,7 @@ bool Expr::EvaluateAsFloat(APFloat &Result, const ASTContext &Ctx, if (!getType()->isRealFloatingType()) return false; + ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsFloat"); EvalResult ExprResult; if (!EvaluateAsRValue(ExprResult, Ctx, InConstantContext) || !ExprResult.Val.isFloat() || @@ -14812,6 +16040,7 @@ bool Expr::EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx, assert(!isValueDependent() && "Expression evaluator can't be called on a dependent expression."); + ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsLValue"); EvalInfo Info(Ctx, Result, EvalInfo::EM_ConstantFold); Info.InConstantContext = InConstantContext; LValue LV; @@ -14855,11 +16084,22 @@ bool Expr::EvaluateAsConstantExpr(EvalResult &Result, const ASTContext &Ctx, ConstantExprKind Kind) const { assert(!isValueDependent() && "Expression evaluator can't be called on a dependent expression."); + bool IsConst; + if (FastEvaluateAsRValue(this, Result, Ctx, IsConst) && Result.Val.hasValue()) + return true; + ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsConstantExpr"); EvalInfo::EvaluationMode EM = EvalInfo::EM_ConstantExpression; EvalInfo Info(Ctx, Result, EM); Info.InConstantContext = true; + if (Info.EnableNewConstInterp) { + if (!Info.Ctx.getInterpContext().evaluate(Info, this, Result.Val)) + return false; + return CheckConstantExpression(Info, getExprLoc(), + getStorageType(Ctx, this), Result.Val, Kind); + } + // The type of the object we're initializing is 'const T' for a class NTTP. QualType T = getType(); if (Kind == ConstantExprKind::ClassTemplateArgument) @@ -14870,16 +16110,26 @@ bool Expr::EvaluateAsConstantExpr(EvalResult &Result, const ASTContext &Ctx, // this doesn't escape. MaterializeTemporaryExpr BaseMTE(T, const_cast<Expr*>(this), true); APValue::LValueBase Base(&BaseMTE); - Info.setEvaluatingDecl(Base, Result.Val); - LValue LVal; - LVal.set(Base); - if (!::EvaluateInPlace(Result.Val, Info, LVal, this) || Result.HasSideEffects) - return false; + if (Info.EnableNewConstInterp) { + if (!Info.Ctx.getInterpContext().evaluateAsRValue(Info, this, Result.Val)) + return false; + } else { + LValue LVal; + LVal.set(Base); + // C++23 [intro.execution]/p5 + // A full-expression is [...] a constant-expression + // So we need to make sure temporary objects are destroyed after having + // evaluating the expression (per C++23 [class.temporary]/p4). + FullExpressionRAII Scope(Info); + if (!::EvaluateInPlace(Result.Val, Info, LVal, this) || + Result.HasSideEffects || !Scope.destroy()) + return false; - if (!Info.discardCleanups()) - llvm_unreachable("Unhandled cleanup; missing full expression marker?"); + if (!Info.discardCleanups()) + llvm_unreachable("Unhandled cleanup; missing full expression marker?"); + } if (!CheckConstantExpression(Info, getExprLoc(), getStorageType(Ctx, this), Result.Val, Kind)) @@ -14903,24 +16153,28 @@ bool Expr::EvaluateAsConstantExpr(EvalResult &Result, const ASTContext &Ctx, bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx, const VarDecl *VD, - SmallVectorImpl<PartialDiagnosticAt> &Notes) const { + SmallVectorImpl<PartialDiagnosticAt> &Notes, + bool IsConstantInitialization) const { assert(!isValueDependent() && "Expression evaluator can't be called on a dependent expression."); - // FIXME: Evaluating initializers for large array and record types can cause - // performance problems. Only do so in C++11 for now. - if (isPRValue() && (getType()->isArrayType() || getType()->isRecordType()) && - !Ctx.getLangOpts().CPlusPlus11) - return false; + llvm::TimeTraceScope TimeScope("EvaluateAsInitializer", [&] { + std::string Name; + llvm::raw_string_ostream OS(Name); + VD->printQualifiedName(OS); + return Name; + }); Expr::EvalStatus EStatus; EStatus.Diag = &Notes; - EvalInfo Info(Ctx, EStatus, VD->isConstexpr() - ? EvalInfo::EM_ConstantExpression - : EvalInfo::EM_ConstantFold); + EvalInfo Info(Ctx, EStatus, + (IsConstantInitialization && + (Ctx.getLangOpts().CPlusPlus || Ctx.getLangOpts().C23)) + ? EvalInfo::EM_ConstantExpression + : EvalInfo::EM_ConstantFold); Info.setEvaluatingDecl(VD, Value); - Info.InConstantContext = true; + Info.InConstantContext = IsConstantInitialization; SourceLocation DeclLoc = VD->getLocation(); QualType DeclTy = VD->getType(); @@ -14929,14 +16183,29 @@ bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx, auto &InterpCtx = const_cast<ASTContext &>(Ctx).getInterpContext(); if (!InterpCtx.evaluateAsInitializer(Info, VD, Value)) return false; + + return CheckConstantExpression(Info, DeclLoc, DeclTy, Value, + ConstantExprKind::Normal); } else { LValue LVal; LVal.set(VD); - if (!EvaluateInPlace(Value, Info, LVal, this, - /*AllowNonLiteralTypes=*/true) || - EStatus.HasSideEffects) - return false; + { + // C++23 [intro.execution]/p5 + // A full-expression is ... an init-declarator ([dcl.decl]) or a + // mem-initializer. + // So we need to make sure temporary objects are destroyed after having + // evaluated the expression (per C++23 [class.temporary]/p4). + // + // FIXME: Otherwise this may break test/Modules/pr68702.cpp because the + // serialization code calls ParmVarDecl::getDefaultArg() which strips the + // outermost FullExpr, such as ExprWithCleanups. + FullExpressionRAII Scope(Info); + if (!EvaluateInPlace(Value, Info, LVal, this, + /*AllowNonLiteralTypes=*/true) || + EStatus.HasSideEffects) + return false; + } // At this point, any lifetime-extended temporaries are completely // initialized. @@ -14945,6 +16214,7 @@ bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx, if (!Info.discardCleanups()) llvm_unreachable("Unhandled cleanup; missing full expression marker?"); } + return CheckConstantExpression(Info, DeclLoc, DeclTy, Value, ConstantExprKind::Normal) && CheckMemoryLeaks(Info); @@ -14965,7 +16235,7 @@ bool VarDecl::evaluateDestruction( APValue DestroyedValue; if (getEvaluatedValue() && !getEvaluatedValue()->isAbsent()) DestroyedValue = *getEvaluatedValue(); - else if (!getDefaultInitValue(getType(), DestroyedValue)) + else if (!handleDefaultInitValue(getType(), DestroyedValue)) return false; if (!EvaluateDestruction(getASTContext(), this, std::move(DestroyedValue), @@ -14994,6 +16264,7 @@ APSInt Expr::EvaluateKnownConstInt(const ASTContext &Ctx, assert(!isValueDependent() && "Expression evaluator can't be called on a dependent expression."); + ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateKnownConstInt"); EvalResult EVResult; EVResult.Diag = Diag; EvalInfo Info(Ctx, EVResult, EvalInfo::EM_IgnoreSideEffects); @@ -15012,6 +16283,7 @@ APSInt Expr::EvaluateKnownConstIntCheckOverflow( assert(!isValueDependent() && "Expression evaluator can't be called on a dependent expression."); + ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateKnownConstIntCheckOverflow"); EvalResult EVResult; EVResult.Diag = Diag; EvalInfo Info(Ctx, EVResult, EvalInfo::EM_IgnoreSideEffects); @@ -15030,6 +16302,7 @@ void Expr::EvaluateForOverflow(const ASTContext &Ctx) const { assert(!isValueDependent() && "Expression evaluator can't be called on a dependent expression."); + ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateForOverflow"); bool IsConst; EvalResult EVResult; if (!FastEvaluateAsRValue(this, EVResult, Ctx, IsConst)) { @@ -15114,7 +16387,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) { case Expr::StringLiteralClass: case Expr::ArraySubscriptExprClass: case Expr::MatrixSubscriptExprClass: - case Expr::OMPArraySectionExprClass: + case Expr::ArraySectionExprClass: case Expr::OMPArrayShapingExprClass: case Expr::OMPIteratorExprClass: case Expr::MemberExprClass: @@ -15191,6 +16464,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) { case Expr::DependentCoawaitExprClass: case Expr::CoyieldExprClass: case Expr::SYCLUniqueStableNameExprClass: + case Expr::CXXParenListInitExprClass: return ICEDiag(IK_NotICE, E->getBeginLoc()); case Expr::InitListExprClass: { @@ -15207,8 +16481,12 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) { case Expr::SizeOfPackExprClass: case Expr::GNUNullExprClass: case Expr::SourceLocExprClass: + case Expr::EmbedExprClass: return NoDiag(); + case Expr::PackIndexingExprClass: + return CheckICE(cast<PackIndexingExpr>(E)->getSelectedExpr(), Ctx); + case Expr::SubstNonTypeTemplateParmExprClass: return CheckICE(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(), Ctx); @@ -15357,7 +16635,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) { llvm::APSInt REval = Exp->getRHS()->EvaluateKnownConstInt(Ctx); if (REval == 0) return ICEDiag(IK_ICEIfUnevaluated, E->getBeginLoc()); - if (REval.isSigned() && REval.isAllOnesValue()) { + if (REval.isSigned() && REval.isAllOnes()) { llvm::APSInt LEval = Exp->getLHS()->EvaluateKnownConstInt(Ctx); if (LEval.isMinSignedValue()) return ICEDiag(IK_ICEIfUnevaluated, E->getBeginLoc()); @@ -15521,6 +16799,8 @@ bool Expr::isIntegerConstantExpr(const ASTContext &Ctx, assert(!isValueDependent() && "Expression evaluator can't be called on a dependent expression."); + ExprTimeTraceScope TimeScope(this, Ctx, "isIntegerConstantExpr"); + if (Ctx.getLangOpts().CPlusPlus11) return EvaluateCPlusPlus11IntegralConstantExpr(Ctx, this, nullptr, Loc); @@ -15532,22 +16812,23 @@ bool Expr::isIntegerConstantExpr(const ASTContext &Ctx, return true; } -Optional<llvm::APSInt> Expr::getIntegerConstantExpr(const ASTContext &Ctx, - SourceLocation *Loc, - bool isEvaluated) const { - assert(!isValueDependent() && - "Expression evaluator can't be called on a dependent expression."); +std::optional<llvm::APSInt> +Expr::getIntegerConstantExpr(const ASTContext &Ctx, SourceLocation *Loc) const { + if (isValueDependent()) { + // Expression evaluator can't succeed on a dependent expression. + return std::nullopt; + } APSInt Value; if (Ctx.getLangOpts().CPlusPlus11) { if (EvaluateCPlusPlus11IntegralConstantExpr(Ctx, this, &Value, Loc)) return Value; - return None; + return std::nullopt; } if (!isIntegerConstantExpr(Ctx, Loc)) - return None; + return std::nullopt; // The only possible side-effects here are due to UB discovered in the // evaluation (for instance, INT_MAX + 1). In such a case, we are still @@ -15611,6 +16892,14 @@ bool Expr::EvaluateWithSubstitution(APValue &Value, ASTContext &Ctx, assert(!isValueDependent() && "Expression evaluator can't be called on a dependent expression."); + llvm::TimeTraceScope TimeScope("EvaluateWithSubstitution", [&] { + std::string Name; + llvm::raw_string_ostream OS(Name); + Callee->getNameForDiagnostic(OS, Ctx.getPrintingPolicy(), + /*Qualified=*/true); + return Name; + }); + Expr::EvalStatus Status; EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantExpressionUnevaluated); Info.InConstantContext = true; @@ -15621,7 +16910,8 @@ bool Expr::EvaluateWithSubstitution(APValue &Value, ASTContext &Ctx, #ifndef NDEBUG auto *MD = dyn_cast<CXXMethodDecl>(Callee); assert(MD && "Don't provide `this` for non-methods."); - assert(!MD->isStatic() && "Don't provide `this` for static methods."); + assert(MD->isImplicitObjectMemberFunction() && + "Don't provide `this` for methods without an implicit object."); #endif if (!This->isValueDependent() && EvaluateObjectArgument(Info, This, ThisVal) && @@ -15659,7 +16949,8 @@ bool Expr::EvaluateWithSubstitution(APValue &Value, ASTContext &Ctx, Info.EvalStatus.HasSideEffects = false; // Build fake call to Callee. - CallStackFrame Frame(Info, Callee->getLocation(), Callee, ThisPtr, Call); + CallStackFrame Frame(Info, Callee->getLocation(), Callee, ThisPtr, This, + Call); // FIXME: Missing ExprWithCleanups in enable_if conditions? FullExpressionRAII Scope(Info); return Evaluate(Value, Info, this) && Scope.destroy() && @@ -15675,6 +16966,14 @@ bool Expr::isPotentialConstantExpr(const FunctionDecl *FD, if (FD->isDependentContext()) return true; + llvm::TimeTraceScope TimeScope("isPotentialConstantExpr", [&] { + std::string Name; + llvm::raw_string_ostream OS(Name); + FD->getNameForDiagnostic(OS, FD->getASTContext().getPrintingPolicy(), + /*Qualified=*/true); + return Name; + }); + Expr::EvalStatus Status; Status.Diag = &Diags; @@ -15707,8 +17006,10 @@ bool Expr::isPotentialConstantExpr(const FunctionDecl *FD, HandleConstructorCall(&VIE, This, Args, CD, Info, Scratch); } else { SourceLocation Loc = FD->getLocation(); - HandleFunctionCall(Loc, FD, (MD && MD->isInstance()) ? &This : nullptr, - Args, CallRef(), FD->getBody(), Info, Scratch, nullptr); + HandleFunctionCall( + Loc, FD, (MD && MD->isImplicitObjectMemberFunction()) ? &This : nullptr, + &VIE, Args, CallRef(), FD->getBody(), Info, Scratch, + /*ResultSlot=*/nullptr); } return Diags.empty(); @@ -15730,7 +17031,8 @@ bool Expr::isPotentialConstantExprUnevaluated(Expr *E, Info.CheckingPotentialConstantExpression = true; // Fabricate a call stack frame to give the arguments a plausible cover story. - CallStackFrame Frame(Info, SourceLocation(), FD, /*This*/ nullptr, CallRef()); + CallStackFrame Frame(Info, SourceLocation(), FD, /*This=*/nullptr, + /*CallExpr=*/nullptr, CallRef()); APValue ResultScratch; Evaluate(ResultScratch, Info, E); @@ -15746,3 +17048,111 @@ bool Expr::tryEvaluateObjectSize(uint64_t &Result, ASTContext &Ctx, EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantFold); return tryEvaluateBuiltinObjectSize(this, Type, Info, Result); } + +static bool EvaluateBuiltinStrLen(const Expr *E, uint64_t &Result, + EvalInfo &Info, std::string *StringResult) { + if (!E->getType()->hasPointerRepresentation() || !E->isPRValue()) + return false; + + LValue String; + + if (!EvaluatePointer(E, String, Info)) + return false; + + QualType CharTy = E->getType()->getPointeeType(); + + // Fast path: if it's a string literal, search the string value. + if (const StringLiteral *S = dyn_cast_or_null<StringLiteral>( + String.getLValueBase().dyn_cast<const Expr *>())) { + StringRef Str = S->getBytes(); + int64_t Off = String.Offset.getQuantity(); + if (Off >= 0 && (uint64_t)Off <= (uint64_t)Str.size() && + S->getCharByteWidth() == 1 && + // FIXME: Add fast-path for wchar_t too. + Info.Ctx.hasSameUnqualifiedType(CharTy, Info.Ctx.CharTy)) { + Str = Str.substr(Off); + + StringRef::size_type Pos = Str.find(0); + if (Pos != StringRef::npos) + Str = Str.substr(0, Pos); + + Result = Str.size(); + if (StringResult) + *StringResult = Str; + return true; + } + + // Fall through to slow path. + } + + // Slow path: scan the bytes of the string looking for the terminating 0. + for (uint64_t Strlen = 0; /**/; ++Strlen) { + APValue Char; + if (!handleLValueToRValueConversion(Info, E, CharTy, String, Char) || + !Char.isInt()) + return false; + if (!Char.getInt()) { + Result = Strlen; + return true; + } else if (StringResult) + StringResult->push_back(Char.getInt().getExtValue()); + if (!HandleLValueArrayAdjustment(Info, E, String, CharTy, 1)) + return false; + } +} + +std::optional<std::string> Expr::tryEvaluateString(ASTContext &Ctx) const { + Expr::EvalStatus Status; + EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantFold); + uint64_t Result; + std::string StringResult; + + if (EvaluateBuiltinStrLen(this, Result, Info, &StringResult)) + return StringResult; + return {}; +} + +bool Expr::EvaluateCharRangeAsString(std::string &Result, + const Expr *SizeExpression, + const Expr *PtrExpression, ASTContext &Ctx, + EvalResult &Status) const { + LValue String; + EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantExpression); + Info.InConstantContext = true; + + FullExpressionRAII Scope(Info); + APSInt SizeValue; + if (!::EvaluateInteger(SizeExpression, SizeValue, Info)) + return false; + + uint64_t Size = SizeValue.getZExtValue(); + + if (!::EvaluatePointer(PtrExpression, String, Info)) + return false; + + QualType CharTy = PtrExpression->getType()->getPointeeType(); + for (uint64_t I = 0; I < Size; ++I) { + APValue Char; + if (!handleLValueToRValueConversion(Info, PtrExpression, CharTy, String, + Char)) + return false; + + APSInt C = Char.getInt(); + Result.push_back(static_cast<char>(C.getExtValue())); + if (!HandleLValueArrayAdjustment(Info, PtrExpression, String, CharTy, 1)) + return false; + } + if (!Scope.destroy()) + return false; + + if (!CheckMemoryLeaks(Info)) + return false; + + return true; +} + +bool Expr::tryEvaluateStrLen(uint64_t &Result, ASTContext &Ctx) const { + Expr::EvalStatus Status; + EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantFold); + return EvaluateBuiltinStrLen(this, Result, Info); +} diff --git a/contrib/llvm-project/clang/lib/AST/ExprObjC.cpp b/contrib/llvm-project/clang/lib/AST/ExprObjC.cpp index 7d932c8b059d..a3222c2da24f 100644 --- a/contrib/llvm-project/clang/lib/AST/ExprObjC.cpp +++ b/contrib/llvm-project/clang/lib/AST/ExprObjC.cpp @@ -271,20 +271,7 @@ QualType ObjCMessageExpr::getCallReturnType(ASTContext &Ctx) const { } return QT; } - - // Expression type might be different from an expected call return type, - // as expression type would never be a reference even if call returns a - // reference. Reconstruct the original expression type. - QualType QT = getType(); - switch (getValueKind()) { - case VK_LValue: - return Ctx.getLValueReferenceType(QT); - case VK_XValue: - return Ctx.getRValueReferenceType(QT); - case VK_PRValue: - return QT; - } - llvm_unreachable("Unsupported ExprValueKind"); + return Ctx.getReferenceQualifiedType(this); } SourceRange ObjCMessageExpr::getReceiverRange() const { diff --git a/contrib/llvm-project/clang/lib/AST/ExternalASTMerger.cpp b/contrib/llvm-project/clang/lib/AST/ExternalASTMerger.cpp index c7789b707b21..8bad3b36244e 100644 --- a/contrib/llvm-project/clang/lib/AST/ExternalASTMerger.cpp +++ b/contrib/llvm-project/clang/lib/AST/ExternalASTMerger.cpp @@ -187,10 +187,7 @@ public: /// Implements the ASTImporter interface for tracking back a declaration /// to its original declaration it came from. Decl *GetOriginalDecl(Decl *To) override { - auto It = ToOrigin.find(To); - if (It != ToOrigin.end()) - return It->second; - return nullptr; + return ToOrigin.lookup(To); } /// Whenever a DeclContext is imported, ensure that ExternalASTSource's origin @@ -425,16 +422,14 @@ void ExternalASTMerger::RemoveSources(llvm::ArrayRef<ImporterSource> Sources) { logs() << "(ExternalASTMerger*)" << (void *)this << " removing source (ASTContext*)" << (void *)&S.getASTContext() << "\n"; - Importers.erase( - std::remove_if(Importers.begin(), Importers.end(), - [&Sources](std::unique_ptr<ASTImporter> &Importer) -> bool { - for (const ImporterSource &S : Sources) { - if (&Importer->getFromContext() == &S.getASTContext()) - return true; - } - return false; - }), - Importers.end()); + llvm::erase_if(Importers, + [&Sources](std::unique_ptr<ASTImporter> &Importer) -> bool { + for (const ImporterSource &S : Sources) { + if (&Importer->getFromContext() == &S.getASTContext()) + return true; + } + return false; + }); for (OriginMap::iterator OI = Origins.begin(), OE = Origins.end(); OI != OE; ) { std::pair<const DeclContext *, DCOrigin> Origin = *OI; bool Erase = false; @@ -543,4 +538,3 @@ void ExternalASTMerger::FindExternalLexicalDecls( return false; }); } - diff --git a/contrib/llvm-project/clang/lib/AST/ExternalASTSource.cpp b/contrib/llvm-project/clang/lib/AST/ExternalASTSource.cpp index 257833182621..a5b6f80bde69 100644 --- a/contrib/llvm-project/clang/lib/AST/ExternalASTSource.cpp +++ b/contrib/llvm-project/clang/lib/AST/ExternalASTSource.cpp @@ -15,14 +15,14 @@ #include "clang/AST/ExternalASTSource.h" #include "clang/AST/ASTContext.h" #include "clang/AST/DeclarationName.h" +#include "clang/Basic/ASTSourceDescriptor.h" #include "clang/Basic/FileManager.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" -#include "clang/Basic/Module.h" #include "clang/Basic/SourceManager.h" -#include "llvm/ADT/None.h" #include "llvm/Support/ErrorHandling.h" #include <cstdint> +#include <optional> using namespace clang; @@ -30,9 +30,9 @@ char ExternalASTSource::ID; ExternalASTSource::~ExternalASTSource() = default; -llvm::Optional<ASTSourceDescriptor> +std::optional<ASTSourceDescriptor> ExternalASTSource::getSourceDescriptor(unsigned ID) { - return None; + return std::nullopt; } ExternalASTSource::ExtKind @@ -68,9 +68,7 @@ bool ExternalASTSource::layoutRecordType( return false; } -Decl *ExternalASTSource::GetExternalDecl(uint32_t ID) { - return nullptr; -} +Decl *ExternalASTSource::GetExternalDecl(GlobalDeclID ID) { return nullptr; } Selector ExternalASTSource::GetExternalSelector(uint32_t ID) { return Selector(); diff --git a/contrib/llvm-project/clang/lib/AST/FormatString.cpp b/contrib/llvm-project/clang/lib/AST/FormatString.cpp index 83b952116a5e..da8164bad518 100644 --- a/contrib/llvm-project/clang/lib/AST/FormatString.cpp +++ b/contrib/llvm-project/clang/lib/AST/FormatString.cpp @@ -15,13 +15,13 @@ #include "clang/Basic/LangOptions.h" #include "clang/Basic/TargetInfo.h" #include "llvm/Support/ConvertUTF.h" +#include <optional> using clang::analyze_format_string::ArgType; using clang::analyze_format_string::FormatStringHandler; using clang::analyze_format_string::FormatSpecifier; using clang::analyze_format_string::LengthModifier; using clang::analyze_format_string::OptionalAmount; -using clang::analyze_format_string::PositionContext; using clang::analyze_format_string::ConversionSpecifier; using namespace clang; @@ -322,6 +322,12 @@ bool clang::analyze_format_string::ParseUTF8InvalidSpecifier( clang::analyze_format_string::ArgType::MatchKind ArgType::matchesType(ASTContext &C, QualType argTy) const { + // When using the format attribute in C++, you can receive a function or an + // array that will necessarily decay to a pointer when passed to the final + // format consumer. Apply decay before type comparison. + if (argTy->canDecayToPointerType()) + argTy = C.getDecayedType(argTy); + if (Ptr) { // It has to be a pointer. const PointerType *PT = argTy->getAs<PointerType>(); @@ -343,72 +349,174 @@ ArgType::matchesType(ASTContext &C, QualType argTy) const { return Match; case AnyCharTy: { - if (const EnumType *ETy = argTy->getAs<EnumType>()) { + if (const auto *ETy = argTy->getAs<EnumType>()) { // If the enum is incomplete we know nothing about the underlying type. - // Assume that it's 'int'. + // Assume that it's 'int'. Do not use the underlying type for a scoped + // enumeration. if (!ETy->getDecl()->isComplete()) return NoMatch; - argTy = ETy->getDecl()->getIntegerType(); + if (ETy->isUnscopedEnumerationType()) + argTy = ETy->getDecl()->getIntegerType(); } - if (const BuiltinType *BT = argTy->getAs<BuiltinType>()) + if (const auto *BT = argTy->getAs<BuiltinType>()) { + // The types are perfectly matched? switch (BT->getKind()) { + default: + break; + case BuiltinType::Char_S: + case BuiltinType::SChar: + case BuiltinType::UChar: + case BuiltinType::Char_U: + return Match; + case BuiltinType::Bool: + if (!Ptr) + return Match; + break; + } + // "Partially matched" because of promotions? + if (!Ptr) { + switch (BT->getKind()) { default: break; - case BuiltinType::Char_S: - case BuiltinType::SChar: - case BuiltinType::UChar: - case BuiltinType::Char_U: - case BuiltinType::Bool: - return Match; + case BuiltinType::Int: + case BuiltinType::UInt: + return MatchPromotion; + case BuiltinType::Short: + case BuiltinType::UShort: + case BuiltinType::WChar_S: + case BuiltinType::WChar_U: + return NoMatchPromotionTypeConfusion; + } } + } return NoMatch; } case SpecificTy: { if (const EnumType *ETy = argTy->getAs<EnumType>()) { // If the enum is incomplete we know nothing about the underlying type. - // Assume that it's 'int'. + // Assume that it's 'int'. Do not use the underlying type for a scoped + // enumeration as that needs an exact match. if (!ETy->getDecl()->isComplete()) argTy = C.IntTy; - else + else if (ETy->isUnscopedEnumerationType()) argTy = ETy->getDecl()->getIntegerType(); } + + if (argTy->isSaturatedFixedPointType()) + argTy = C.getCorrespondingUnsaturatedType(argTy); + argTy = C.getCanonicalType(argTy).getUnqualifiedType(); if (T == argTy) return Match; - // Check for "compatible types". - if (const BuiltinType *BT = argTy->getAs<BuiltinType>()) + if (const auto *BT = argTy->getAs<BuiltinType>()) { + // Check if the only difference between them is signed vs unsigned + // if true, return match signedness. switch (BT->getKind()) { default: break; + case BuiltinType::Bool: + if (Ptr && (T == C.UnsignedCharTy || T == C.SignedCharTy)) + return NoMatch; + [[fallthrough]]; case BuiltinType::Char_S: case BuiltinType::SChar: + if (T == C.UnsignedShortTy || T == C.ShortTy) + return NoMatchTypeConfusion; + if (T == C.UnsignedCharTy) + return NoMatchSignedness; + if (T == C.SignedCharTy) + return Match; + break; case BuiltinType::Char_U: case BuiltinType::UChar: - case BuiltinType::Bool: if (T == C.UnsignedShortTy || T == C.ShortTy) return NoMatchTypeConfusion; - return T == C.UnsignedCharTy || T == C.SignedCharTy ? Match - : NoMatch; + if (T == C.UnsignedCharTy) + return Match; + if (T == C.SignedCharTy) + return NoMatchSignedness; + break; case BuiltinType::Short: - return T == C.UnsignedShortTy ? Match : NoMatch; + if (T == C.UnsignedShortTy) + return NoMatchSignedness; + break; case BuiltinType::UShort: - return T == C.ShortTy ? Match : NoMatch; + if (T == C.ShortTy) + return NoMatchSignedness; + break; case BuiltinType::Int: - return T == C.UnsignedIntTy ? Match : NoMatch; + if (T == C.UnsignedIntTy) + return NoMatchSignedness; + break; case BuiltinType::UInt: - return T == C.IntTy ? Match : NoMatch; + if (T == C.IntTy) + return NoMatchSignedness; + break; case BuiltinType::Long: - return T == C.UnsignedLongTy ? Match : NoMatch; + if (T == C.UnsignedLongTy) + return NoMatchSignedness; + break; case BuiltinType::ULong: - return T == C.LongTy ? Match : NoMatch; + if (T == C.LongTy) + return NoMatchSignedness; + break; case BuiltinType::LongLong: - return T == C.UnsignedLongLongTy ? Match : NoMatch; + if (T == C.UnsignedLongLongTy) + return NoMatchSignedness; + break; case BuiltinType::ULongLong: - return T == C.LongLongTy ? Match : NoMatch; - } + if (T == C.LongLongTy) + return NoMatchSignedness; + break; + } + // "Partially matched" because of promotions? + if (!Ptr) { + switch (BT->getKind()) { + default: + break; + case BuiltinType::Bool: + if (T == C.IntTy || T == C.UnsignedIntTy) + return MatchPromotion; + break; + case BuiltinType::Int: + case BuiltinType::UInt: + if (T == C.SignedCharTy || T == C.UnsignedCharTy || + T == C.ShortTy || T == C.UnsignedShortTy || T == C.WCharTy || + T == C.WideCharTy) + return MatchPromotion; + break; + case BuiltinType::Char_U: + if (T == C.UnsignedIntTy) + return MatchPromotion; + if (T == C.UnsignedShortTy) + return NoMatchPromotionTypeConfusion; + break; + case BuiltinType::Char_S: + if (T == C.IntTy) + return MatchPromotion; + if (T == C.ShortTy) + return NoMatchPromotionTypeConfusion; + break; + case BuiltinType::Half: + case BuiltinType::Float: + if (T == C.DoubleTy) + return MatchPromotion; + break; + case BuiltinType::Short: + case BuiltinType::UShort: + if (T == C.SignedCharTy || T == C.UnsignedCharTy) + return NoMatchPromotionTypeConfusion; + break; + case BuiltinType::WChar_U: + case BuiltinType::WChar_S: + if (T != C.WCharTy && T != C.WideCharTy) + return NoMatchPromotionTypeConfusion; + } + } + } return NoMatch; } @@ -446,7 +554,7 @@ ArgType::matchesType(ASTContext &C, QualType argTy) const { if (C.getCanonicalType(argTy).getUnqualifiedType() == WInt) return Match; - QualType PromoArg = argTy->isPromotableIntegerType() + QualType PromoArg = C.isPromotableIntegerType(argTy) ? C.getPromotedIntegerType(argTy) : argTy; PromoArg = C.getCanonicalType(PromoArg).getUnqualifiedType(); @@ -619,6 +727,8 @@ analyze_format_string::LengthModifier::toString() const { const char *ConversionSpecifier::toString() const { switch (kind) { + case bArg: return "b"; + case BArg: return "B"; case dArg: return "d"; case DArg: return "D"; case iArg: return "i"; @@ -664,17 +774,27 @@ const char *ConversionSpecifier::toString() const { // MS specific specifiers. case ZArg: return "Z"; + + // ISO/IEC TR 18037 (fixed-point) specific specifiers. + case rArg: + return "r"; + case RArg: + return "R"; + case kArg: + return "k"; + case KArg: + return "K"; } return nullptr; } -Optional<ConversionSpecifier> +std::optional<ConversionSpecifier> ConversionSpecifier::getStandardSpecifier() const { ConversionSpecifier::Kind NewKind; switch (getKind()) { default: - return None; + return std::nullopt; case DArg: NewKind = dArg; break; @@ -728,6 +848,9 @@ bool FormatSpecifier::hasValidLengthModifier(const TargetInfo &Target, if (LO.OpenCL && CS.isDoubleArg()) return !VectorNumElts.isInvalid(); + if (CS.isFixedPointArg()) + return true; + if (Target.getTriple().isOSMSVCRT()) { switch (CS.getKind()) { case ConversionSpecifier::cArg: @@ -740,7 +863,7 @@ bool FormatSpecifier::hasValidLengthModifier(const TargetInfo &Target, break; } } - LLVM_FALLTHROUGH; + [[fallthrough]]; case LengthModifier::AsChar: case LengthModifier::AsLongLong: case LengthModifier::AsQuad: @@ -748,6 +871,8 @@ bool FormatSpecifier::hasValidLengthModifier(const TargetInfo &Target, case LengthModifier::AsSizeT: case LengthModifier::AsPtrDiff: switch (CS.getKind()) { + case ConversionSpecifier::bArg: + case ConversionSpecifier::BArg: case ConversionSpecifier::dArg: case ConversionSpecifier::DArg: case ConversionSpecifier::iArg: @@ -761,7 +886,7 @@ bool FormatSpecifier::hasValidLengthModifier(const TargetInfo &Target, return true; case ConversionSpecifier::FreeBSDrArg: case ConversionSpecifier::FreeBSDyArg: - return Target.getTriple().isOSFreeBSD() || Target.getTriple().isPS4(); + return Target.getTriple().isOSFreeBSD() || Target.getTriple().isPS(); default: return false; } @@ -778,7 +903,12 @@ bool FormatSpecifier::hasValidLengthModifier(const TargetInfo &Target, return true; } + if (CS.isFixedPointArg()) + return true; + switch (CS.getKind()) { + case ConversionSpecifier::bArg: + case ConversionSpecifier::BArg: case ConversionSpecifier::dArg: case ConversionSpecifier::DArg: case ConversionSpecifier::iArg: @@ -796,7 +926,7 @@ bool FormatSpecifier::hasValidLengthModifier(const TargetInfo &Target, return true; case ConversionSpecifier::FreeBSDrArg: case ConversionSpecifier::FreeBSDyArg: - return Target.getTriple().isOSFreeBSD() || Target.getTriple().isPS4(); + return Target.getTriple().isOSFreeBSD() || Target.getTriple().isPS(); default: return false; } @@ -903,6 +1033,8 @@ bool FormatSpecifier::hasStandardLengthModifier() const { bool FormatSpecifier::hasStandardConversionSpecifier( const LangOptions &LangOpt) const { switch (CS.getKind()) { + case ConversionSpecifier::bArg: + case ConversionSpecifier::BArg: case ConversionSpecifier::cArg: case ConversionSpecifier::dArg: case ConversionSpecifier::iArg: @@ -940,6 +1072,11 @@ bool FormatSpecifier::hasStandardConversionSpecifier( case ConversionSpecifier::UArg: case ConversionSpecifier::ZArg: return false; + case ConversionSpecifier::rArg: + case ConversionSpecifier::RArg: + case ConversionSpecifier::kArg: + case ConversionSpecifier::KArg: + return LangOpt.FixedPoint; } llvm_unreachable("Invalid ConversionSpecifier Kind!"); } @@ -961,7 +1098,8 @@ bool FormatSpecifier::hasStandardLengthConversionCombination() const { return true; } -Optional<LengthModifier> FormatSpecifier::getCorrectedLengthModifier() const { +std::optional<LengthModifier> +FormatSpecifier::getCorrectedLengthModifier() const { if (CS.isAnyIntArg() || CS.getKind() == ConversionSpecifier::nArg) { if (LM.getKind() == LengthModifier::AsLongDouble || LM.getKind() == LengthModifier::AsQuad) { @@ -971,15 +1109,14 @@ Optional<LengthModifier> FormatSpecifier::getCorrectedLengthModifier() const { } } - return None; + return std::nullopt; } bool FormatSpecifier::namedTypeToLengthModifier(QualType QT, LengthModifier &LM) { - assert(isa<TypedefType>(QT) && "Expected a TypedefType"); - const TypedefNameDecl *Typedef = cast<TypedefType>(QT)->getDecl(); - - for (;;) { + for (/**/; const auto *TT = QT->getAs<TypedefType>(); + QT = TT->getDecl()->getUnderlyingType()) { + const TypedefNameDecl *Typedef = TT->getDecl(); const IdentifierInfo *Identifier = Typedef->getIdentifier(); if (Identifier->getName() == "size_t") { LM.setKind(LengthModifier::AsSizeT); @@ -998,12 +1135,6 @@ bool FormatSpecifier::namedTypeToLengthModifier(QualType QT, LM.setKind(LengthModifier::AsPtrDiff); return true; } - - QualType T = Typedef->getUnderlyingType(); - if (!isa<TypedefType>(T)) - break; - - Typedef = cast<TypedefType>(T)->getDecl(); } return false; } diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Boolean.h b/contrib/llvm-project/clang/lib/AST/Interp/Boolean.h index 2baa717311bc..23f728603676 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/Boolean.h +++ b/contrib/llvm-project/clang/lib/AST/Interp/Boolean.h @@ -22,17 +22,15 @@ namespace clang { namespace interp { /// Wrapper around boolean types. -class Boolean { +class Boolean final { private: /// Underlying boolean. bool V; - /// Construct a wrapper from a boolean. - explicit Boolean(bool V) : V(V) {} - public: /// Zero-initializes a boolean. Boolean() : V(false) {} + explicit Boolean(bool V) : V(V) {} bool operator<(Boolean RHS) const { return V < RHS.V; } bool operator>(Boolean RHS) const { return V > RHS.V; } @@ -44,11 +42,13 @@ class Boolean { bool operator>(unsigned RHS) const { return static_cast<unsigned>(V) > RHS; } Boolean operator-() const { return Boolean(V); } + Boolean operator-(const Boolean &Other) const { return Boolean(V - Other.V); } Boolean operator~() const { return Boolean(true); } - explicit operator unsigned() const { return V; } - explicit operator int64_t() const { return V; } - explicit operator uint64_t() const { return V; } + template <typename Ty, typename = std::enable_if_t<std::is_integral_v<Ty>>> + explicit operator Ty() const { + return V; + } APSInt toAPSInt() const { return APSInt(APInt(1, static_cast<uint64_t>(V), false), true); @@ -56,11 +56,11 @@ class Boolean { APSInt toAPSInt(unsigned NumBits) const { return APSInt(toAPSInt().zextOrTrunc(NumBits), true); } - APValue toAPValue() const { return APValue(toAPSInt()); } + APValue toAPValue(const ASTContext &) const { return APValue(toAPSInt()); } Boolean toUnsigned() const { return *this; } - constexpr static unsigned bitWidth() { return true; } + constexpr static unsigned bitWidth() { return 1; } bool isZero() const { return !V; } bool isMin() const { return isZero(); } @@ -80,13 +80,20 @@ class Boolean { Boolean truncate(unsigned TruncBits) const { return *this; } void print(llvm::raw_ostream &OS) const { OS << (V ? "true" : "false"); } + std::string toDiagnosticString(const ASTContext &Ctx) const { + std::string NameStr; + llvm::raw_string_ostream OS(NameStr); + print(OS); + return NameStr; + } static Boolean min(unsigned NumBits) { return Boolean(false); } static Boolean max(unsigned NumBits) { return Boolean(true); } - template <typename T> - static std::enable_if_t<std::is_integral<T>::value, Boolean> from(T Value) { - return Boolean(Value != 0); + template <typename T> static Boolean from(T Value) { + if constexpr (std::is_integral<T>::value) + return Boolean(Value != 0); + return Boolean(static_cast<decltype(Boolean::V)>(Value) != 0); } template <unsigned SrcBits, bool SrcSign> @@ -95,11 +102,6 @@ class Boolean { return Boolean(!Value.isZero()); } - template <bool SrcSign> - static Boolean from(Integral<0, SrcSign> Value) { - return Boolean(!Value.isZero()); - } - static Boolean zero() { return from(false); } template <typename T> @@ -134,6 +136,16 @@ class Boolean { *R = Boolean(A.V && B.V); return false; } + + static bool inv(Boolean A, Boolean *R) { + *R = Boolean(!A.V); + return false; + } + + static bool neg(Boolean A, Boolean *R) { + *R = Boolean(A.V); + return false; + } }; inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const Boolean &B) { diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.cpp b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.cpp index 7a4569820a1d..fee4432a8f66 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.cpp +++ b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.cpp @@ -8,73 +8,190 @@ #include "ByteCodeEmitter.h" #include "Context.h" +#include "Floating.h" +#include "IntegralAP.h" #include "Opcode.h" #include "Program.h" +#include "clang/AST/ASTLambda.h" +#include "clang/AST/Attr.h" #include "clang/AST/DeclCXX.h" +#include "clang/Basic/Builtins.h" +#include <type_traits> using namespace clang; using namespace clang::interp; -using APSInt = llvm::APSInt; -using Error = llvm::Error; +/// Unevaluated builtins don't get their arguments put on the stack +/// automatically. They instead operate on the AST of their Call +/// Expression. +/// Similar information is available via ASTContext::BuiltinInfo, +/// but that is not correct for our use cases. +static bool isUnevaluatedBuiltin(unsigned BuiltinID) { + return BuiltinID == Builtin::BI__builtin_classify_type || + BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size; +} + +Function *ByteCodeEmitter::compileFunc(const FunctionDecl *FuncDecl) { -Expected<Function *> ByteCodeEmitter::compileFunc(const FunctionDecl *F) { - // Do not try to compile undefined functions. - if (!F->isDefined(F) || (!F->hasBody() && F->willHaveBody())) + // Manually created functions that haven't been assigned proper + // parameters yet. + if (!FuncDecl->param_empty() && !FuncDecl->param_begin()) return nullptr; + bool IsLambdaStaticInvoker = false; + if (const auto *MD = dyn_cast<CXXMethodDecl>(FuncDecl); + MD && MD->isLambdaStaticInvoker()) { + // For a lambda static invoker, we might have to pick a specialized + // version if the lambda is generic. In that case, the picked function + // will *NOT* be a static invoker anymore. However, it will still + // be a non-static member function, this (usually) requiring an + // instance pointer. We suppress that later in this function. + IsLambdaStaticInvoker = true; + + const CXXRecordDecl *ClosureClass = MD->getParent(); + assert(ClosureClass->captures_begin() == ClosureClass->captures_end()); + if (ClosureClass->isGenericLambda()) { + const CXXMethodDecl *LambdaCallOp = ClosureClass->getLambdaCallOperator(); + assert(MD->isFunctionTemplateSpecialization() && + "A generic lambda's static-invoker function must be a " + "template specialization"); + const TemplateArgumentList *TAL = MD->getTemplateSpecializationArgs(); + FunctionTemplateDecl *CallOpTemplate = + LambdaCallOp->getDescribedFunctionTemplate(); + void *InsertPos = nullptr; + const FunctionDecl *CorrespondingCallOpSpecialization = + CallOpTemplate->findSpecialization(TAL->asArray(), InsertPos); + assert(CorrespondingCallOpSpecialization); + FuncDecl = cast<CXXMethodDecl>(CorrespondingCallOpSpecialization); + } + } + // Set up argument indices. unsigned ParamOffset = 0; SmallVector<PrimType, 8> ParamTypes; + SmallVector<unsigned, 8> ParamOffsets; llvm::DenseMap<unsigned, Function::ParamDescriptor> ParamDescriptors; - // If the return is not a primitive, a pointer to the storage where the value - // is initialized in is passed as the first argument. - QualType Ty = F->getReturnType(); + // If the return is not a primitive, a pointer to the storage where the + // value is initialized in is passed as the first argument. See 'RVO' + // elsewhere in the code. + QualType Ty = FuncDecl->getReturnType(); + bool HasRVO = false; if (!Ty->isVoidType() && !Ctx.classify(Ty)) { + HasRVO = true; ParamTypes.push_back(PT_Ptr); + ParamOffsets.push_back(ParamOffset); ParamOffset += align(primSize(PT_Ptr)); } - // Assign descriptors to all parameters. - // Composite objects are lowered to pointers. - for (const ParmVarDecl *PD : F->parameters()) { - PrimType Ty; - if (llvm::Optional<PrimType> T = Ctx.classify(PD->getType())) { - Ty = *T; - } else { - Ty = PT_Ptr; + // If the function decl is a member decl, the next parameter is + // the 'this' pointer. This parameter is pop()ed from the + // InterpStack when calling the function. + bool HasThisPointer = false; + if (const auto *MD = dyn_cast<CXXMethodDecl>(FuncDecl)) { + if (!IsLambdaStaticInvoker) { + HasThisPointer = MD->isInstance(); + if (MD->isImplicitObjectMemberFunction()) { + ParamTypes.push_back(PT_Ptr); + ParamOffsets.push_back(ParamOffset); + ParamOffset += align(primSize(PT_Ptr)); + } + } + + // Set up lambda capture to closure record field mapping. + if (isLambdaCallOperator(MD)) { + // The parent record needs to be complete, we need to know about all + // the lambda captures. + if (!MD->getParent()->isCompleteDefinition()) + return nullptr; + + const Record *R = P.getOrCreateRecord(MD->getParent()); + llvm::DenseMap<const ValueDecl *, FieldDecl *> LC; + FieldDecl *LTC; + + MD->getParent()->getCaptureFields(LC, LTC); + + for (auto Cap : LC) { + // Static lambdas cannot have any captures. If this one does, + // it has already been diagnosed and we can only ignore it. + if (MD->isStatic()) + return nullptr; + + unsigned Offset = R->getField(Cap.second)->Offset; + this->LambdaCaptures[Cap.first] = { + Offset, Cap.second->getType()->isReferenceType()}; + } + if (LTC) { + QualType CaptureType = R->getField(LTC)->Decl->getType(); + this->LambdaThisCapture = {R->getField(LTC)->Offset, + CaptureType->isReferenceType() || + CaptureType->isPointerType()}; + } } + } - Descriptor *Desc = P.createDescriptor(PD, Ty); - ParamDescriptors.insert({ParamOffset, {Ty, Desc}}); - Params.insert({PD, ParamOffset}); - ParamOffset += align(primSize(Ty)); - ParamTypes.push_back(Ty); + // Assign descriptors to all parameters. + // Composite objects are lowered to pointers. + for (const ParmVarDecl *PD : FuncDecl->parameters()) { + std::optional<PrimType> T = Ctx.classify(PD->getType()); + PrimType PT = T.value_or(PT_Ptr); + Descriptor *Desc = P.createDescriptor(PD, PT); + ParamDescriptors.insert({ParamOffset, {PT, Desc}}); + Params.insert({PD, {ParamOffset, T != std::nullopt}}); + ParamOffsets.push_back(ParamOffset); + ParamOffset += align(primSize(PT)); + ParamTypes.push_back(PT); } // Create a handle over the emitted code. - Function *Func = P.createFunction(F, ParamOffset, std::move(ParamTypes), - std::move(ParamDescriptors)); - // Compile the function body. - if (!F->isConstexpr() || !visitFunc(F)) { - // Return a dummy function if compilation failed. - if (BailLocation) - return llvm::make_error<ByteCodeGenError>(*BailLocation); - else - return Func; - } else { - // Create scopes from descriptors. - llvm::SmallVector<Scope, 2> Scopes; - for (auto &DS : Descriptors) { - Scopes.emplace_back(std::move(DS)); - } + Function *Func = P.getFunction(FuncDecl); + if (!Func) { + bool IsUnevaluatedBuiltin = false; + if (unsigned BI = FuncDecl->getBuiltinID()) + IsUnevaluatedBuiltin = isUnevaluatedBuiltin(BI); + + Func = + P.createFunction(FuncDecl, ParamOffset, std::move(ParamTypes), + std::move(ParamDescriptors), std::move(ParamOffsets), + HasThisPointer, HasRVO, IsUnevaluatedBuiltin); + } + + assert(Func); + // For not-yet-defined functions, we only create a Function instance and + // compile their body later. + if (!FuncDecl->isDefined() || + (FuncDecl->willHaveBody() && !FuncDecl->hasBody())) { + Func->setDefined(false); + return Func; + } - // Set the function's code. - Func->setCode(NextLocalOffset, std::move(Code), std::move(SrcMap), - std::move(Scopes)); + Func->setDefined(true); + + // Lambda static invokers are a special case that we emit custom code for. + bool IsEligibleForCompilation = false; + if (const auto *MD = dyn_cast<CXXMethodDecl>(FuncDecl)) + IsEligibleForCompilation = MD->isLambdaStaticInvoker(); + if (!IsEligibleForCompilation) + IsEligibleForCompilation = + FuncDecl->isConstexpr() || FuncDecl->hasAttr<MSConstexprAttr>(); + + // Compile the function body. + if (!IsEligibleForCompilation || !visitFunc(FuncDecl)) { + Func->setIsFullyCompiled(true); return Func; } + + // Create scopes from descriptors. + llvm::SmallVector<Scope, 2> Scopes; + for (auto &DS : Descriptors) { + Scopes.emplace_back(std::move(DS)); + } + + // Set the function's code. + Func->setCode(NextLocalOffset, std::move(Code), std::move(SrcMap), + std::move(Scopes), FuncDecl->hasBody()); + Func->setIsFullyCompiled(true); + return Func; } Scope::Local ByteCodeEmitter::createLocal(Descriptor *D) { @@ -87,15 +204,17 @@ Scope::Local ByteCodeEmitter::createLocal(Descriptor *D) { void ByteCodeEmitter::emitLabel(LabelTy Label) { const size_t Target = Code.size(); LabelOffsets.insert({Label, Target}); - auto It = LabelRelocs.find(Label); - if (It != LabelRelocs.end()) { + + if (auto It = LabelRelocs.find(Label); + It != LabelRelocs.end()) { for (unsigned Reloc : It->second) { using namespace llvm::support; - /// Rewrite the operand of all jumps to this label. - void *Location = Code.data() + Reloc - sizeof(int32_t); + // Rewrite the operand of all jumps to this label. + void *Location = Code.data() + Reloc - align(sizeof(int32_t)); + assert(aligned(Location)); const int32_t Offset = Target - static_cast<int64_t>(Reloc); - endian::write<int32_t, endianness::native, 1>(Location, Offset); + endian::write<int32_t, llvm::endianness::native>(Location, Offset); } LabelRelocs.erase(It); } @@ -103,49 +222,101 @@ void ByteCodeEmitter::emitLabel(LabelTy Label) { int32_t ByteCodeEmitter::getOffset(LabelTy Label) { // Compute the PC offset which the jump is relative to. - const int64_t Position = Code.size() + sizeof(Opcode) + sizeof(int32_t); + const int64_t Position = + Code.size() + align(sizeof(Opcode)) + align(sizeof(int32_t)); + assert(aligned(Position)); // If target is known, compute jump offset. - auto It = LabelOffsets.find(Label); - if (It != LabelOffsets.end()) { + if (auto It = LabelOffsets.find(Label); + It != LabelOffsets.end()) return It->second - Position; - } // Otherwise, record relocation and return dummy offset. LabelRelocs[Label].push_back(Position); return 0ull; } -bool ByteCodeEmitter::bail(const SourceLocation &Loc) { - if (!BailLocation) - BailLocation = Loc; - return false; +/// Helper to write bytecode and bail out if 32-bit offsets become invalid. +/// Pointers will be automatically marshalled as 32-bit IDs. +template <typename T> +static void emit(Program &P, std::vector<std::byte> &Code, const T &Val, + bool &Success) { + size_t Size; + + if constexpr (std::is_pointer_v<T>) + Size = sizeof(uint32_t); + else + Size = sizeof(T); + + if (Code.size() + Size > std::numeric_limits<unsigned>::max()) { + Success = false; + return; + } + + // Access must be aligned! + size_t ValPos = align(Code.size()); + Size = align(Size); + assert(aligned(ValPos + Size)); + Code.resize(ValPos + Size); + + if constexpr (!std::is_pointer_v<T>) { + new (Code.data() + ValPos) T(Val); + } else { + uint32_t ID = P.getOrCreateNativePointer(Val); + new (Code.data() + ValPos) uint32_t(ID); + } +} + +/// Emits a serializable value. These usually (potentially) contain +/// heap-allocated memory and aren't trivially copyable. +template <typename T> +static void emitSerialized(std::vector<std::byte> &Code, const T &Val, + bool &Success) { + size_t Size = Val.bytesToSerialize(); + + if (Code.size() + Size > std::numeric_limits<unsigned>::max()) { + Success = false; + return; + } + + // Access must be aligned! + size_t ValPos = align(Code.size()); + Size = align(Size); + assert(aligned(ValPos + Size)); + Code.resize(ValPos + Size); + + Val.serialize(Code.data() + ValPos); +} + +template <> +void emit(Program &P, std::vector<std::byte> &Code, const Floating &Val, + bool &Success) { + emitSerialized(Code, Val, Success); +} + +template <> +void emit(Program &P, std::vector<std::byte> &Code, + const IntegralAP<false> &Val, bool &Success) { + emitSerialized(Code, Val, Success); +} + +template <> +void emit(Program &P, std::vector<std::byte> &Code, const IntegralAP<true> &Val, + bool &Success) { + emitSerialized(Code, Val, Success); } template <typename... Tys> bool ByteCodeEmitter::emitOp(Opcode Op, const Tys &... Args, const SourceInfo &SI) { bool Success = true; - /// Helper to write bytecode and bail out if 32-bit offsets become invalid. - auto emit = [this, &Success](const char *Data, size_t Size) { - if (Code.size() + Size > std::numeric_limits<unsigned>::max()) { - Success = false; - return; - } - Code.insert(Code.end(), Data, Data + Size); - }; - - /// The opcode is followed by arguments. The source info is - /// attached to the address after the opcode. - emit(reinterpret_cast<const char *>(&Op), sizeof(Opcode)); + // The opcode is followed by arguments. The source info is + // attached to the address after the opcode. + emit(P, Code, Op, Success); if (SI) SrcMap.emplace_back(Code.size(), SI); - /// The initializer list forces the expression to be evaluated - /// for each argument in the variadic template, in order. - (void)std::initializer_list<int>{ - (emit(reinterpret_cast<const char *>(&Args), sizeof(Args)), 0)...}; - + (..., emit(P, Code, Args, Success)); return Success; } diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.h b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.h index 03452a350c96..a19a25c2f9e8 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.h +++ b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.h @@ -1,4 +1,4 @@ -//===--- ByteCodeEmitter.h - Instruction emitter for the VM ---------*- C++ -*-===// +//===--- ByteCodeEmitter.h - Instruction emitter for the VM -----*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -13,19 +13,13 @@ #ifndef LLVM_CLANG_AST_INTERP_LINKEMITTER_H #define LLVM_CLANG_AST_INTERP_LINKEMITTER_H -#include "ByteCodeGenError.h" #include "Context.h" -#include "InterpStack.h" -#include "InterpState.h" #include "PrimType.h" #include "Program.h" #include "Source.h" -#include "llvm/Support/Error.h" namespace clang { namespace interp { -class Context; -class SourceInfo; enum Opcode : uint32_t; /// An emitter which links the program to bytecode for later use. @@ -37,7 +31,7 @@ protected: public: /// Compiles the function into the module. - llvm::Expected<Function *> compileFunc(const FunctionDecl *F); + Function *compileFunc(const FunctionDecl *FuncDecl); protected: ByteCodeEmitter(Context &Ctx, Program &P) : Ctx(Ctx), P(P) {} @@ -52,12 +46,7 @@ protected: /// Methods implemented by the compiler. virtual bool visitFunc(const FunctionDecl *E) = 0; virtual bool visitExpr(const Expr *E) = 0; - virtual bool visitDecl(const VarDecl *E) = 0; - - /// Bails out if a given node cannot be compiled. - bool bail(const Stmt *S) { return bail(S->getBeginLoc()); } - bool bail(const Decl *D) { return bail(D->getBeginLoc()); } - bool bail(const SourceLocation &Loc); + virtual bool visitDeclAndReturn(const VarDecl *E, bool ConstantContext) = 0; /// Emits jumps. bool jumpTrue(const LabelTy &Label); @@ -65,11 +54,18 @@ protected: bool jump(const LabelTy &Label); bool fallthrough(const LabelTy &Label); + /// We're always emitting bytecode. + bool isActive() const { return true; } + /// Callback for local registration. Local createLocal(Descriptor *D); /// Parameter indices. - llvm::DenseMap<const ParmVarDecl *, unsigned> Params; + llvm::DenseMap<const ParmVarDecl *, ParamOffset> Params; + /// Lambda captures. + llvm::DenseMap<const ValueDecl *, ParamOffset> LambdaCaptures; + /// Offset of the This parameter in a lambda record. + ParamOffset LambdaThisCapture{0, false}; /// Local descriptors. llvm::SmallVector<SmallVector<Local, 8>, 2> Descriptors; @@ -82,14 +78,12 @@ private: LabelTy NextLabel = 0; /// Offset of the next local variable. unsigned NextLocalOffset = 0; - /// Location of a failure. - llvm::Optional<SourceLocation> BailLocation; /// Label information for linker. llvm::DenseMap<LabelTy, unsigned> LabelOffsets; /// Location of label relocations. llvm::DenseMap<LabelTy, llvm::SmallVector<unsigned, 5>> LabelRelocs; /// Program code. - std::vector<char> Code; + std::vector<std::byte> Code; /// Opcode to expression mapping. SourceMap SrcMap; diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.cpp b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.cpp deleted file mode 100644 index 5c8cb4274260..000000000000 --- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.cpp +++ /dev/null @@ -1,580 +0,0 @@ -//===--- ByteCodeExprGen.cpp - Code generator for expressions ---*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "ByteCodeExprGen.h" -#include "ByteCodeEmitter.h" -#include "ByteCodeGenError.h" -#include "Context.h" -#include "Function.h" -#include "PrimType.h" -#include "Program.h" -#include "State.h" - -using namespace clang; -using namespace clang::interp; - -using APSInt = llvm::APSInt; -template <typename T> using Expected = llvm::Expected<T>; -template <typename T> using Optional = llvm::Optional<T>; - -namespace clang { -namespace interp { - -/// Scope used to handle temporaries in toplevel variable declarations. -template <class Emitter> class DeclScope final : public LocalScope<Emitter> { -public: - DeclScope(ByteCodeExprGen<Emitter> *Ctx, const VarDecl *VD) - : LocalScope<Emitter>(Ctx), Scope(Ctx->P, VD) {} - - void addExtended(const Scope::Local &Local) override { - return this->addLocal(Local); - } - -private: - Program::DeclScope Scope; -}; - -/// Scope used to handle initialization methods. -template <class Emitter> class OptionScope { -public: - using InitFnRef = typename ByteCodeExprGen<Emitter>::InitFnRef; - using ChainedInitFnRef = std::function<bool(InitFnRef)>; - - /// Root constructor, compiling or discarding primitives. - OptionScope(ByteCodeExprGen<Emitter> *Ctx, bool NewDiscardResult) - : Ctx(Ctx), OldDiscardResult(Ctx->DiscardResult), - OldInitFn(std::move(Ctx->InitFn)) { - Ctx->DiscardResult = NewDiscardResult; - Ctx->InitFn = llvm::Optional<InitFnRef>{}; - } - - /// Root constructor, setting up compilation state. - OptionScope(ByteCodeExprGen<Emitter> *Ctx, InitFnRef NewInitFn) - : Ctx(Ctx), OldDiscardResult(Ctx->DiscardResult), - OldInitFn(std::move(Ctx->InitFn)) { - Ctx->DiscardResult = true; - Ctx->InitFn = NewInitFn; - } - - /// Extends the chain of initialisation pointers. - OptionScope(ByteCodeExprGen<Emitter> *Ctx, ChainedInitFnRef NewInitFn) - : Ctx(Ctx), OldDiscardResult(Ctx->DiscardResult), - OldInitFn(std::move(Ctx->InitFn)) { - assert(OldInitFn && "missing initializer"); - Ctx->InitFn = [this, NewInitFn] { return NewInitFn(*OldInitFn); }; - } - - ~OptionScope() { - Ctx->DiscardResult = OldDiscardResult; - Ctx->InitFn = std::move(OldInitFn); - } - -private: - /// Parent context. - ByteCodeExprGen<Emitter> *Ctx; - /// Old discard flag to restore. - bool OldDiscardResult; - /// Old pointer emitter to restore. - llvm::Optional<InitFnRef> OldInitFn; -}; - -} // namespace interp -} // namespace clang - -template <class Emitter> -bool ByteCodeExprGen<Emitter>::VisitCastExpr(const CastExpr *CE) { - auto *SubExpr = CE->getSubExpr(); - switch (CE->getCastKind()) { - - case CK_LValueToRValue: { - return dereference( - CE->getSubExpr(), DerefKind::Read, - [](PrimType) { - // Value loaded - nothing to do here. - return true; - }, - [this, CE](PrimType T) { - // Pointer on stack - dereference it. - if (!this->emitLoadPop(T, CE)) - return false; - return DiscardResult ? this->emitPop(T, CE) : true; - }); - } - - case CK_ArrayToPointerDecay: - case CK_AtomicToNonAtomic: - case CK_ConstructorConversion: - case CK_FunctionToPointerDecay: - case CK_NonAtomicToAtomic: - case CK_NoOp: - case CK_UserDefinedConversion: - return this->Visit(SubExpr); - - case CK_ToVoid: - return discard(SubExpr); - - default: { - // TODO: implement other casts. - return this->bail(CE); - } - } -} - -template <class Emitter> -bool ByteCodeExprGen<Emitter>::VisitIntegerLiteral(const IntegerLiteral *LE) { - if (DiscardResult) - return true; - - auto Val = LE->getValue(); - QualType LitTy = LE->getType(); - if (Optional<PrimType> T = classify(LitTy)) - return emitConst(*T, getIntWidth(LitTy), LE->getValue(), LE); - return this->bail(LE); -} - -template <class Emitter> -bool ByteCodeExprGen<Emitter>::VisitParenExpr(const ParenExpr *PE) { - return this->Visit(PE->getSubExpr()); -} - -template <class Emitter> -bool ByteCodeExprGen<Emitter>::VisitBinaryOperator(const BinaryOperator *BO) { - const Expr *LHS = BO->getLHS(); - const Expr *RHS = BO->getRHS(); - - // Deal with operations which have composite or void types. - switch (BO->getOpcode()) { - case BO_Comma: - if (!discard(LHS)) - return false; - if (!this->Visit(RHS)) - return false; - return true; - default: - break; - } - - // Typecheck the args. - Optional<PrimType> LT = classify(LHS->getType()); - Optional<PrimType> RT = classify(RHS->getType()); - if (!LT || !RT) { - return this->bail(BO); - } - - if (Optional<PrimType> T = classify(BO->getType())) { - if (!visit(LHS)) - return false; - if (!visit(RHS)) - return false; - - auto Discard = [this, T, BO](bool Result) { - if (!Result) - return false; - return DiscardResult ? this->emitPop(*T, BO) : true; - }; - - switch (BO->getOpcode()) { - case BO_EQ: - return Discard(this->emitEQ(*LT, BO)); - case BO_NE: - return Discard(this->emitNE(*LT, BO)); - case BO_LT: - return Discard(this->emitLT(*LT, BO)); - case BO_LE: - return Discard(this->emitLE(*LT, BO)); - case BO_GT: - return Discard(this->emitGT(*LT, BO)); - case BO_GE: - return Discard(this->emitGE(*LT, BO)); - case BO_Sub: - return Discard(this->emitSub(*T, BO)); - case BO_Add: - return Discard(this->emitAdd(*T, BO)); - case BO_Mul: - return Discard(this->emitMul(*T, BO)); - default: - return this->bail(BO); - } - } - - return this->bail(BO); -} - -template <class Emitter> -bool ByteCodeExprGen<Emitter>::discard(const Expr *E) { - OptionScope<Emitter> Scope(this, /*discardResult=*/true); - return this->Visit(E); -} - -template <class Emitter> -bool ByteCodeExprGen<Emitter>::visit(const Expr *E) { - OptionScope<Emitter> Scope(this, /*discardResult=*/false); - return this->Visit(E); -} - -template <class Emitter> -bool ByteCodeExprGen<Emitter>::visitBool(const Expr *E) { - if (Optional<PrimType> T = classify(E->getType())) { - return visit(E); - } else { - return this->bail(E); - } -} - -template <class Emitter> -bool ByteCodeExprGen<Emitter>::visitZeroInitializer(PrimType T, const Expr *E) { - switch (T) { - case PT_Bool: - return this->emitZeroBool(E); - case PT_Sint8: - return this->emitZeroSint8(E); - case PT_Uint8: - return this->emitZeroUint8(E); - case PT_Sint16: - return this->emitZeroSint16(E); - case PT_Uint16: - return this->emitZeroUint16(E); - case PT_Sint32: - return this->emitZeroSint32(E); - case PT_Uint32: - return this->emitZeroUint32(E); - case PT_Sint64: - return this->emitZeroSint64(E); - case PT_Uint64: - return this->emitZeroUint64(E); - case PT_Ptr: - return this->emitNullPtr(E); - } - llvm_unreachable("unknown primitive type"); -} - -template <class Emitter> -bool ByteCodeExprGen<Emitter>::dereference( - const Expr *LV, DerefKind AK, llvm::function_ref<bool(PrimType)> Direct, - llvm::function_ref<bool(PrimType)> Indirect) { - if (Optional<PrimType> T = classify(LV->getType())) { - if (!LV->refersToBitField()) { - // Only primitive, non bit-field types can be dereferenced directly. - if (auto *DE = dyn_cast<DeclRefExpr>(LV)) { - if (!DE->getDecl()->getType()->isReferenceType()) { - if (auto *PD = dyn_cast<ParmVarDecl>(DE->getDecl())) - return dereferenceParam(LV, *T, PD, AK, Direct, Indirect); - if (auto *VD = dyn_cast<VarDecl>(DE->getDecl())) - return dereferenceVar(LV, *T, VD, AK, Direct, Indirect); - } - } - } - - if (!visit(LV)) - return false; - return Indirect(*T); - } - - return false; -} - -template <class Emitter> -bool ByteCodeExprGen<Emitter>::dereferenceParam( - const Expr *LV, PrimType T, const ParmVarDecl *PD, DerefKind AK, - llvm::function_ref<bool(PrimType)> Direct, - llvm::function_ref<bool(PrimType)> Indirect) { - auto It = this->Params.find(PD); - if (It != this->Params.end()) { - unsigned Idx = It->second; - switch (AK) { - case DerefKind::Read: - return DiscardResult ? true : this->emitGetParam(T, Idx, LV); - - case DerefKind::Write: - if (!Direct(T)) - return false; - if (!this->emitSetParam(T, Idx, LV)) - return false; - return DiscardResult ? true : this->emitGetPtrParam(Idx, LV); - - case DerefKind::ReadWrite: - if (!this->emitGetParam(T, Idx, LV)) - return false; - if (!Direct(T)) - return false; - if (!this->emitSetParam(T, Idx, LV)) - return false; - return DiscardResult ? true : this->emitGetPtrParam(Idx, LV); - } - return true; - } - - // If the param is a pointer, we can dereference a dummy value. - if (!DiscardResult && T == PT_Ptr && AK == DerefKind::Read) { - if (auto Idx = P.getOrCreateDummy(PD)) - return this->emitGetPtrGlobal(*Idx, PD); - return false; - } - - // Value cannot be produced - try to emit pointer and do stuff with it. - return visit(LV) && Indirect(T); -} - -template <class Emitter> -bool ByteCodeExprGen<Emitter>::dereferenceVar( - const Expr *LV, PrimType T, const VarDecl *VD, DerefKind AK, - llvm::function_ref<bool(PrimType)> Direct, - llvm::function_ref<bool(PrimType)> Indirect) { - auto It = Locals.find(VD); - if (It != Locals.end()) { - const auto &L = It->second; - switch (AK) { - case DerefKind::Read: - if (!this->emitGetLocal(T, L.Offset, LV)) - return false; - return DiscardResult ? this->emitPop(T, LV) : true; - - case DerefKind::Write: - if (!Direct(T)) - return false; - if (!this->emitSetLocal(T, L.Offset, LV)) - return false; - return DiscardResult ? true : this->emitGetPtrLocal(L.Offset, LV); - - case DerefKind::ReadWrite: - if (!this->emitGetLocal(T, L.Offset, LV)) - return false; - if (!Direct(T)) - return false; - if (!this->emitSetLocal(T, L.Offset, LV)) - return false; - return DiscardResult ? true : this->emitGetPtrLocal(L.Offset, LV); - } - } else if (auto Idx = getGlobalIdx(VD)) { - switch (AK) { - case DerefKind::Read: - if (!this->emitGetGlobal(T, *Idx, LV)) - return false; - return DiscardResult ? this->emitPop(T, LV) : true; - - case DerefKind::Write: - if (!Direct(T)) - return false; - if (!this->emitSetGlobal(T, *Idx, LV)) - return false; - return DiscardResult ? true : this->emitGetPtrGlobal(*Idx, LV); - - case DerefKind::ReadWrite: - if (!this->emitGetGlobal(T, *Idx, LV)) - return false; - if (!Direct(T)) - return false; - if (!this->emitSetGlobal(T, *Idx, LV)) - return false; - return DiscardResult ? true : this->emitGetPtrGlobal(*Idx, LV); - } - } - - // If the declaration is a constant value, emit it here even - // though the declaration was not evaluated in the current scope. - // The access mode can only be read in this case. - if (!DiscardResult && AK == DerefKind::Read) { - if (VD->hasLocalStorage() && VD->hasInit() && !VD->isConstexpr()) { - QualType VT = VD->getType(); - if (VT.isConstQualified() && VT->isFundamentalType()) - return this->Visit(VD->getInit()); - } - } - - // Value cannot be produced - try to emit pointer. - return visit(LV) && Indirect(T); -} - -template <class Emitter> -bool ByteCodeExprGen<Emitter>::emitConst(PrimType T, unsigned NumBits, - const APInt &Value, const Expr *E) { - switch (T) { - case PT_Sint8: - return this->emitConstSint8(Value.getSExtValue(), E); - case PT_Uint8: - return this->emitConstUint8(Value.getZExtValue(), E); - case PT_Sint16: - return this->emitConstSint16(Value.getSExtValue(), E); - case PT_Uint16: - return this->emitConstUint16(Value.getZExtValue(), E); - case PT_Sint32: - return this->emitConstSint32(Value.getSExtValue(), E); - case PT_Uint32: - return this->emitConstUint32(Value.getZExtValue(), E); - case PT_Sint64: - return this->emitConstSint64(Value.getSExtValue(), E); - case PT_Uint64: - return this->emitConstUint64(Value.getZExtValue(), E); - case PT_Bool: - return this->emitConstBool(Value.getBoolValue(), E); - case PT_Ptr: - llvm_unreachable("Invalid integral type"); - break; - } - llvm_unreachable("unknown primitive type"); -} - -template <class Emitter> -unsigned ByteCodeExprGen<Emitter>::allocateLocalPrimitive(DeclTy &&Src, - PrimType Ty, - bool IsConst, - bool IsExtended) { - Descriptor *D = P.createDescriptor(Src, Ty, IsConst, Src.is<const Expr *>()); - Scope::Local Local = this->createLocal(D); - if (auto *VD = dyn_cast_or_null<ValueDecl>(Src.dyn_cast<const Decl *>())) - Locals.insert({VD, Local}); - VarScope->add(Local, IsExtended); - return Local.Offset; -} - -template <class Emitter> -llvm::Optional<unsigned> -ByteCodeExprGen<Emitter>::allocateLocal(DeclTy &&Src, bool IsExtended) { - QualType Ty; - - const ValueDecl *Key = nullptr; - bool IsTemporary = false; - if (auto *VD = dyn_cast_or_null<ValueDecl>(Src.dyn_cast<const Decl *>())) { - Key = VD; - Ty = VD->getType(); - } - if (auto *E = Src.dyn_cast<const Expr *>()) { - IsTemporary = true; - Ty = E->getType(); - } - - Descriptor *D = P.createDescriptor(Src, Ty.getTypePtr(), - Ty.isConstQualified(), IsTemporary); - if (!D) - return {}; - - Scope::Local Local = this->createLocal(D); - if (Key) - Locals.insert({Key, Local}); - VarScope->add(Local, IsExtended); - return Local.Offset; -} - -template <class Emitter> -bool ByteCodeExprGen<Emitter>::visitInitializer( - const Expr *Init, InitFnRef InitFn) { - OptionScope<Emitter> Scope(this, InitFn); - return this->Visit(Init); -} - -template <class Emitter> -bool ByteCodeExprGen<Emitter>::getPtrVarDecl(const VarDecl *VD, const Expr *E) { - // Generate a pointer to the local, loading refs. - if (Optional<unsigned> Idx = getGlobalIdx(VD)) { - if (VD->getType()->isReferenceType()) - return this->emitGetGlobalPtr(*Idx, E); - else - return this->emitGetPtrGlobal(*Idx, E); - } - return this->bail(VD); -} - -template <class Emitter> -llvm::Optional<unsigned> -ByteCodeExprGen<Emitter>::getGlobalIdx(const VarDecl *VD) { - if (VD->isConstexpr()) { - // Constexpr decl - it must have already been defined. - return P.getGlobal(VD); - } - if (!VD->hasLocalStorage()) { - // Not constexpr, but a global var - can have pointer taken. - Program::DeclScope Scope(P, VD); - return P.getOrCreateGlobal(VD); - } - return {}; -} - -template <class Emitter> -const RecordType *ByteCodeExprGen<Emitter>::getRecordTy(QualType Ty) { - if (auto *PT = dyn_cast<PointerType>(Ty)) - return PT->getPointeeType()->getAs<RecordType>(); - else - return Ty->getAs<RecordType>(); -} - -template <class Emitter> -Record *ByteCodeExprGen<Emitter>::getRecord(QualType Ty) { - if (auto *RecordTy = getRecordTy(Ty)) { - return getRecord(RecordTy->getDecl()); - } - return nullptr; -} - -template <class Emitter> -Record *ByteCodeExprGen<Emitter>::getRecord(const RecordDecl *RD) { - return P.getOrCreateRecord(RD); -} - -template <class Emitter> -bool ByteCodeExprGen<Emitter>::visitExpr(const Expr *Exp) { - ExprScope<Emitter> RootScope(this); - if (!visit(Exp)) - return false; - - if (Optional<PrimType> T = classify(Exp)) - return this->emitRet(*T, Exp); - else - return this->emitRetValue(Exp); -} - -template <class Emitter> -bool ByteCodeExprGen<Emitter>::visitDecl(const VarDecl *VD) { - const Expr *Init = VD->getInit(); - - if (Optional<unsigned> I = P.createGlobal(VD)) { - if (Optional<PrimType> T = classify(VD->getType())) { - { - // Primitive declarations - compute the value and set it. - DeclScope<Emitter> LocalScope(this, VD); - if (!visit(Init)) - return false; - } - - // If the declaration is global, save the value for later use. - if (!this->emitDup(*T, VD)) - return false; - if (!this->emitInitGlobal(*T, *I, VD)) - return false; - return this->emitRet(*T, VD); - } else { - { - // Composite declarations - allocate storage and initialize it. - DeclScope<Emitter> LocalScope(this, VD); - if (!visitGlobalInitializer(Init, *I)) - return false; - } - - // Return a pointer to the global. - if (!this->emitGetPtrGlobal(*I, VD)) - return false; - return this->emitRetValue(VD); - } - } - - return this->bail(VD); -} - -template <class Emitter> -void ByteCodeExprGen<Emitter>::emitCleanup() { - for (VariableScope<Emitter> *C = VarScope; C; C = C->getParent()) - C->emitDestruction(); -} - -namespace clang { -namespace interp { - -template class ByteCodeExprGen<ByteCodeEmitter>; -template class ByteCodeExprGen<EvalEmitter>; - -} // namespace interp -} // namespace clang diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h deleted file mode 100644 index 716f28551e58..000000000000 --- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h +++ /dev/null @@ -1,332 +0,0 @@ -//===--- ByteCodeExprGen.h - Code generator for expressions -----*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// Defines the constexpr bytecode compiler. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_CLANG_AST_INTERP_BYTECODEEXPRGEN_H -#define LLVM_CLANG_AST_INTERP_BYTECODEEXPRGEN_H - -#include "ByteCodeEmitter.h" -#include "EvalEmitter.h" -#include "Pointer.h" -#include "PrimType.h" -#include "Record.h" -#include "clang/AST/Decl.h" -#include "clang/AST/Expr.h" -#include "clang/AST/StmtVisitor.h" -#include "clang/Basic/TargetInfo.h" -#include "llvm/ADT/Optional.h" - -namespace clang { -class QualType; - -namespace interp { -class Function; -class State; - -template <class Emitter> class LocalScope; -template <class Emitter> class RecordScope; -template <class Emitter> class VariableScope; -template <class Emitter> class DeclScope; -template <class Emitter> class OptionScope; - -/// Compilation context for expressions. -template <class Emitter> -class ByteCodeExprGen : public ConstStmtVisitor<ByteCodeExprGen<Emitter>, bool>, - public Emitter { -protected: - // Emitters for opcodes of various arities. - using NullaryFn = bool (ByteCodeExprGen::*)(const SourceInfo &); - using UnaryFn = bool (ByteCodeExprGen::*)(PrimType, const SourceInfo &); - using BinaryFn = bool (ByteCodeExprGen::*)(PrimType, PrimType, - const SourceInfo &); - - // Aliases for types defined in the emitter. - using LabelTy = typename Emitter::LabelTy; - using AddrTy = typename Emitter::AddrTy; - - // Reference to a function generating the pointer of an initialized object.s - using InitFnRef = std::function<bool()>; - - /// Current compilation context. - Context &Ctx; - /// Program to link to. - Program &P; - -public: - /// Initializes the compiler and the backend emitter. - template <typename... Tys> - ByteCodeExprGen(Context &Ctx, Program &P, Tys &&... Args) - : Emitter(Ctx, P, Args...), Ctx(Ctx), P(P) {} - - // Expression visitors - result returned on stack. - bool VisitCastExpr(const CastExpr *E); - bool VisitIntegerLiteral(const IntegerLiteral *E); - bool VisitParenExpr(const ParenExpr *E); - bool VisitBinaryOperator(const BinaryOperator *E); - -protected: - bool visitExpr(const Expr *E) override; - bool visitDecl(const VarDecl *VD) override; - -protected: - /// Emits scope cleanup instructions. - void emitCleanup(); - - /// Returns a record type from a record or pointer type. - const RecordType *getRecordTy(QualType Ty); - - /// Returns a record from a record or pointer type. - Record *getRecord(QualType Ty); - Record *getRecord(const RecordDecl *RD); - - /// Returns the size int bits of an integer. - unsigned getIntWidth(QualType Ty) { - auto &ASTContext = Ctx.getASTContext(); - return ASTContext.getIntWidth(Ty); - } - - /// Returns the value of CHAR_BIT. - unsigned getCharBit() const { - auto &ASTContext = Ctx.getASTContext(); - return ASTContext.getTargetInfo().getCharWidth(); - } - - /// Classifies a type. - llvm::Optional<PrimType> classify(const Expr *E) const { - return E->isGLValue() ? PT_Ptr : classify(E->getType()); - } - llvm::Optional<PrimType> classify(QualType Ty) const { - return Ctx.classify(Ty); - } - - /// Checks if a pointer needs adjustment. - bool needsAdjust(QualType Ty) const { - return true; - } - - /// Classifies a known primitive type - PrimType classifyPrim(QualType Ty) const { - if (auto T = classify(Ty)) { - return *T; - } - llvm_unreachable("not a primitive type"); - } - - /// Evaluates an expression for side effects and discards the result. - bool discard(const Expr *E); - /// Evaluates an expression and places result on stack. - bool visit(const Expr *E); - /// Compiles an initializer for a local. - bool visitInitializer(const Expr *E, InitFnRef GenPtr); - - /// Visits an expression and converts it to a boolean. - bool visitBool(const Expr *E); - - /// Visits an initializer for a local. - bool visitLocalInitializer(const Expr *Init, unsigned I) { - return visitInitializer(Init, [this, I, Init] { - return this->emitGetPtrLocal(I, Init); - }); - } - - /// Visits an initializer for a global. - bool visitGlobalInitializer(const Expr *Init, unsigned I) { - return visitInitializer(Init, [this, I, Init] { - return this->emitGetPtrGlobal(I, Init); - }); - } - - /// Visits a delegated initializer. - bool visitThisInitializer(const Expr *I) { - return visitInitializer(I, [this, I] { return this->emitThis(I); }); - } - - /// Creates a local primitive value. - unsigned allocateLocalPrimitive(DeclTy &&Decl, PrimType Ty, bool IsMutable, - bool IsExtended = false); - - /// Allocates a space storing a local given its type. - llvm::Optional<unsigned> allocateLocal(DeclTy &&Decl, - bool IsExtended = false); - -private: - friend class VariableScope<Emitter>; - friend class LocalScope<Emitter>; - friend class RecordScope<Emitter>; - friend class DeclScope<Emitter>; - friend class OptionScope<Emitter>; - - /// Emits a zero initializer. - bool visitZeroInitializer(PrimType T, const Expr *E); - - enum class DerefKind { - /// Value is read and pushed to stack. - Read, - /// Direct method generates a value which is written. Returns pointer. - Write, - /// Direct method receives the value, pushes mutated value. Returns pointer. - ReadWrite, - }; - - /// Method to directly load a value. If the value can be fetched directly, - /// the direct handler is called. Otherwise, a pointer is left on the stack - /// and the indirect handler is expected to operate on that. - bool dereference(const Expr *LV, DerefKind AK, - llvm::function_ref<bool(PrimType)> Direct, - llvm::function_ref<bool(PrimType)> Indirect); - bool dereferenceParam(const Expr *LV, PrimType T, const ParmVarDecl *PD, - DerefKind AK, - llvm::function_ref<bool(PrimType)> Direct, - llvm::function_ref<bool(PrimType)> Indirect); - bool dereferenceVar(const Expr *LV, PrimType T, const VarDecl *PD, - DerefKind AK, llvm::function_ref<bool(PrimType)> Direct, - llvm::function_ref<bool(PrimType)> Indirect); - - /// Emits an APInt constant. - bool emitConst(PrimType T, unsigned NumBits, const llvm::APInt &Value, - const Expr *E); - - /// Emits an integer constant. - template <typename T> bool emitConst(const Expr *E, T Value) { - QualType Ty = E->getType(); - unsigned NumBits = getIntWidth(Ty); - APInt WrappedValue(NumBits, Value, std::is_signed<T>::value); - return emitConst(*Ctx.classify(Ty), NumBits, WrappedValue, E); - } - - /// Returns a pointer to a variable declaration. - bool getPtrVarDecl(const VarDecl *VD, const Expr *E); - - /// Returns the index of a global. - llvm::Optional<unsigned> getGlobalIdx(const VarDecl *VD); - - /// Emits the initialized pointer. - bool emitInitFn() { - assert(InitFn && "missing initializer"); - return (*InitFn)(); - } - -protected: - /// Variable to storage mapping. - llvm::DenseMap<const ValueDecl *, Scope::Local> Locals; - - /// OpaqueValueExpr to location mapping. - llvm::DenseMap<const OpaqueValueExpr *, unsigned> OpaqueExprs; - - /// Current scope. - VariableScope<Emitter> *VarScope = nullptr; - - /// Current argument index. - llvm::Optional<uint64_t> ArrayIndex; - - /// Flag indicating if return value is to be discarded. - bool DiscardResult = false; - - /// Expression being initialized. - llvm::Optional<InitFnRef> InitFn = {}; -}; - -extern template class ByteCodeExprGen<ByteCodeEmitter>; -extern template class ByteCodeExprGen<EvalEmitter>; - -/// Scope chain managing the variable lifetimes. -template <class Emitter> class VariableScope { -public: - virtual ~VariableScope() { Ctx->VarScope = this->Parent; } - - void add(const Scope::Local &Local, bool IsExtended) { - if (IsExtended) - this->addExtended(Local); - else - this->addLocal(Local); - } - - virtual void addLocal(const Scope::Local &Local) { - if (this->Parent) - this->Parent->addLocal(Local); - } - - virtual void addExtended(const Scope::Local &Local) { - if (this->Parent) - this->Parent->addExtended(Local); - } - - virtual void emitDestruction() {} - - VariableScope *getParent() { return Parent; } - -protected: - VariableScope(ByteCodeExprGen<Emitter> *Ctx) - : Ctx(Ctx), Parent(Ctx->VarScope) { - Ctx->VarScope = this; - } - - /// ByteCodeExprGen instance. - ByteCodeExprGen<Emitter> *Ctx; - /// Link to the parent scope. - VariableScope *Parent; -}; - -/// Scope for local variables. -/// -/// When the scope is destroyed, instructions are emitted to tear down -/// all variables declared in this scope. -template <class Emitter> class LocalScope : public VariableScope<Emitter> { -public: - LocalScope(ByteCodeExprGen<Emitter> *Ctx) : VariableScope<Emitter>(Ctx) {} - - ~LocalScope() override { this->emitDestruction(); } - - void addLocal(const Scope::Local &Local) override { - if (!Idx.hasValue()) { - Idx = this->Ctx->Descriptors.size(); - this->Ctx->Descriptors.emplace_back(); - } - - this->Ctx->Descriptors[*Idx].emplace_back(Local); - } - - void emitDestruction() override { - if (!Idx.hasValue()) - return; - this->Ctx->emitDestroy(*Idx, SourceInfo{}); - } - -protected: - /// Index of the scope in the chain. - Optional<unsigned> Idx; -}; - -/// Scope for storage declared in a compound statement. -template <class Emitter> class BlockScope final : public LocalScope<Emitter> { -public: - BlockScope(ByteCodeExprGen<Emitter> *Ctx) : LocalScope<Emitter>(Ctx) {} - - void addExtended(const Scope::Local &Local) override { - llvm_unreachable("Cannot create temporaries in full scopes"); - } -}; - -/// Expression scope which tracks potentially lifetime extended -/// temporaries which are hoisted to the parent scope on exit. -template <class Emitter> class ExprScope final : public LocalScope<Emitter> { -public: - ExprScope(ByteCodeExprGen<Emitter> *Ctx) : LocalScope<Emitter>(Ctx) {} - - void addExtended(const Scope::Local &Local) override { - this->Parent->addLocal(Local); - } -}; - -} // namespace interp -} // namespace clang - -#endif diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeGenError.cpp b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeGenError.cpp deleted file mode 100644 index 5fd3d77c3842..000000000000 --- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeGenError.cpp +++ /dev/null @@ -1,14 +0,0 @@ -//===--- ByteCodeGenError.h - Byte code generation error --------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "ByteCodeGenError.h" - -using namespace clang; -using namespace clang::interp; - -char ByteCodeGenError::ID; diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeGenError.h b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeGenError.h deleted file mode 100644 index a4fa4917705d..000000000000 --- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeGenError.h +++ /dev/null @@ -1,46 +0,0 @@ -//===--- ByteCodeGenError.h - Byte code generation error ----------*- C -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_CLANG_AST_INTERP_BYTECODEGENERROR_H -#define LLVM_CLANG_AST_INTERP_BYTECODEGENERROR_H - -#include "clang/AST/Decl.h" -#include "clang/AST/Stmt.h" -#include "clang/Basic/SourceLocation.h" -#include "llvm/Support/Error.h" - -namespace clang { -namespace interp { - -/// Error thrown by the compiler. -struct ByteCodeGenError : public llvm::ErrorInfo<ByteCodeGenError> { -public: - ByteCodeGenError(SourceLocation Loc) : Loc(Loc) {} - ByteCodeGenError(const Stmt *S) : ByteCodeGenError(S->getBeginLoc()) {} - ByteCodeGenError(const Decl *D) : ByteCodeGenError(D->getBeginLoc()) {} - - void log(raw_ostream &OS) const override { OS << "unimplemented feature"; } - - const SourceLocation &getLoc() const { return Loc; } - - static char ID; - -private: - // Start of the item where the error occurred. - SourceLocation Loc; - - // Users are not expected to use error_code. - std::error_code convertToErrorCode() const override { - return llvm::inconvertibleErrorCode(); - } -}; - -} // namespace interp -} // namespace clang - -#endif diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.cpp b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.cpp deleted file mode 100644 index 5b47489e65e0..000000000000 --- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.cpp +++ /dev/null @@ -1,263 +0,0 @@ -//===--- ByteCodeStmtGen.cpp - Code generator for expressions ---*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "ByteCodeStmtGen.h" -#include "ByteCodeEmitter.h" -#include "ByteCodeGenError.h" -#include "Context.h" -#include "Function.h" -#include "PrimType.h" -#include "Program.h" -#include "State.h" -#include "clang/Basic/LLVM.h" - -using namespace clang; -using namespace clang::interp; - -namespace clang { -namespace interp { - -/// Scope managing label targets. -template <class Emitter> class LabelScope { -public: - virtual ~LabelScope() { } - -protected: - LabelScope(ByteCodeStmtGen<Emitter> *Ctx) : Ctx(Ctx) {} - /// ByteCodeStmtGen instance. - ByteCodeStmtGen<Emitter> *Ctx; -}; - -/// Sets the context for break/continue statements. -template <class Emitter> class LoopScope final : public LabelScope<Emitter> { -public: - using LabelTy = typename ByteCodeStmtGen<Emitter>::LabelTy; - using OptLabelTy = typename ByteCodeStmtGen<Emitter>::OptLabelTy; - - LoopScope(ByteCodeStmtGen<Emitter> *Ctx, LabelTy BreakLabel, - LabelTy ContinueLabel) - : LabelScope<Emitter>(Ctx), OldBreakLabel(Ctx->BreakLabel), - OldContinueLabel(Ctx->ContinueLabel) { - this->Ctx->BreakLabel = BreakLabel; - this->Ctx->ContinueLabel = ContinueLabel; - } - - ~LoopScope() { - this->Ctx->BreakLabel = OldBreakLabel; - this->Ctx->ContinueLabel = OldContinueLabel; - } - -private: - OptLabelTy OldBreakLabel; - OptLabelTy OldContinueLabel; -}; - -// Sets the context for a switch scope, mapping labels. -template <class Emitter> class SwitchScope final : public LabelScope<Emitter> { -public: - using LabelTy = typename ByteCodeStmtGen<Emitter>::LabelTy; - using OptLabelTy = typename ByteCodeStmtGen<Emitter>::OptLabelTy; - using CaseMap = typename ByteCodeStmtGen<Emitter>::CaseMap; - - SwitchScope(ByteCodeStmtGen<Emitter> *Ctx, CaseMap &&CaseLabels, - LabelTy BreakLabel, OptLabelTy DefaultLabel) - : LabelScope<Emitter>(Ctx), OldBreakLabel(Ctx->BreakLabel), - OldDefaultLabel(this->Ctx->DefaultLabel), - OldCaseLabels(std::move(this->Ctx->CaseLabels)) { - this->Ctx->BreakLabel = BreakLabel; - this->Ctx->DefaultLabel = DefaultLabel; - this->Ctx->CaseLabels = std::move(CaseLabels); - } - - ~SwitchScope() { - this->Ctx->BreakLabel = OldBreakLabel; - this->Ctx->DefaultLabel = OldDefaultLabel; - this->Ctx->CaseLabels = std::move(OldCaseLabels); - } - -private: - OptLabelTy OldBreakLabel; - OptLabelTy OldDefaultLabel; - CaseMap OldCaseLabels; -}; - -} // namespace interp -} // namespace clang - -template <class Emitter> -bool ByteCodeStmtGen<Emitter>::visitFunc(const FunctionDecl *F) { - // Classify the return type. - ReturnType = this->classify(F->getReturnType()); - - // Set up fields and context if a constructor. - if (auto *MD = dyn_cast<CXXMethodDecl>(F)) - return this->bail(MD); - - if (auto *Body = F->getBody()) - if (!visitStmt(Body)) - return false; - - // Emit a guard return to protect against a code path missing one. - if (F->getReturnType()->isVoidType()) - return this->emitRetVoid(SourceInfo{}); - else - return this->emitNoRet(SourceInfo{}); -} - -template <class Emitter> -bool ByteCodeStmtGen<Emitter>::visitStmt(const Stmt *S) { - switch (S->getStmtClass()) { - case Stmt::CompoundStmtClass: - return visitCompoundStmt(cast<CompoundStmt>(S)); - case Stmt::DeclStmtClass: - return visitDeclStmt(cast<DeclStmt>(S)); - case Stmt::ReturnStmtClass: - return visitReturnStmt(cast<ReturnStmt>(S)); - case Stmt::IfStmtClass: - return visitIfStmt(cast<IfStmt>(S)); - case Stmt::NullStmtClass: - return true; - default: { - if (auto *Exp = dyn_cast<Expr>(S)) - return this->discard(Exp); - return this->bail(S); - } - } -} - -template <class Emitter> -bool ByteCodeStmtGen<Emitter>::visitCompoundStmt( - const CompoundStmt *CompoundStmt) { - BlockScope<Emitter> Scope(this); - for (auto *InnerStmt : CompoundStmt->body()) - if (!visitStmt(InnerStmt)) - return false; - return true; -} - -template <class Emitter> -bool ByteCodeStmtGen<Emitter>::visitDeclStmt(const DeclStmt *DS) { - for (auto *D : DS->decls()) { - // Variable declarator. - if (auto *VD = dyn_cast<VarDecl>(D)) { - if (!visitVarDecl(VD)) - return false; - continue; - } - - // Decomposition declarator. - if (auto *DD = dyn_cast<DecompositionDecl>(D)) { - return this->bail(DD); - } - } - - return true; -} - -template <class Emitter> -bool ByteCodeStmtGen<Emitter>::visitReturnStmt(const ReturnStmt *RS) { - if (const Expr *RE = RS->getRetValue()) { - ExprScope<Emitter> RetScope(this); - if (ReturnType) { - // Primitive types are simply returned. - if (!this->visit(RE)) - return false; - this->emitCleanup(); - return this->emitRet(*ReturnType, RS); - } else { - // RVO - construct the value in the return location. - auto ReturnLocation = [this, RE] { return this->emitGetParamPtr(0, RE); }; - if (!this->visitInitializer(RE, ReturnLocation)) - return false; - this->emitCleanup(); - return this->emitRetVoid(RS); - } - } else { - this->emitCleanup(); - if (!this->emitRetVoid(RS)) - return false; - return true; - } -} - -template <class Emitter> -bool ByteCodeStmtGen<Emitter>::visitIfStmt(const IfStmt *IS) { - BlockScope<Emitter> IfScope(this); - if (auto *CondInit = IS->getInit()) - if (!visitStmt(IS->getInit())) - return false; - - if (const DeclStmt *CondDecl = IS->getConditionVariableDeclStmt()) - if (!visitDeclStmt(CondDecl)) - return false; - - if (!this->visitBool(IS->getCond())) - return false; - - if (const Stmt *Else = IS->getElse()) { - LabelTy LabelElse = this->getLabel(); - LabelTy LabelEnd = this->getLabel(); - if (!this->jumpFalse(LabelElse)) - return false; - if (!visitStmt(IS->getThen())) - return false; - if (!this->jump(LabelEnd)) - return false; - this->emitLabel(LabelElse); - if (!visitStmt(Else)) - return false; - this->emitLabel(LabelEnd); - } else { - LabelTy LabelEnd = this->getLabel(); - if (!this->jumpFalse(LabelEnd)) - return false; - if (!visitStmt(IS->getThen())) - return false; - this->emitLabel(LabelEnd); - } - - return true; -} - -template <class Emitter> -bool ByteCodeStmtGen<Emitter>::visitVarDecl(const VarDecl *VD) { - auto DT = VD->getType(); - - if (!VD->hasLocalStorage()) { - // No code generation required. - return true; - } - - // Integers, pointers, primitives. - if (Optional<PrimType> T = this->classify(DT)) { - auto Off = this->allocateLocalPrimitive(VD, *T, DT.isConstQualified()); - // Compile the initialiser in its own scope. - { - ExprScope<Emitter> Scope(this); - if (!this->visit(VD->getInit())) - return false; - } - // Set the value. - return this->emitSetLocal(*T, Off, VD); - } else { - // Composite types - allocate storage and initialize it. - if (auto Off = this->allocateLocal(VD)) { - return this->visitLocalInitializer(VD->getInit(), *Off); - } else { - return this->bail(VD); - } - } -} - -namespace clang { -namespace interp { - -template class ByteCodeStmtGen<ByteCodeEmitter>; - -} // namespace interp -} // namespace clang diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.h b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.h deleted file mode 100644 index d9c0b64ed4b8..000000000000 --- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.h +++ /dev/null @@ -1,89 +0,0 @@ -//===--- ByteCodeStmtGen.h - Code generator for expressions -----*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// Defines the constexpr bytecode compiler. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_CLANG_AST_INTERP_BYTECODESTMTGEN_H -#define LLVM_CLANG_AST_INTERP_BYTECODESTMTGEN_H - -#include "ByteCodeEmitter.h" -#include "ByteCodeExprGen.h" -#include "EvalEmitter.h" -#include "Pointer.h" -#include "PrimType.h" -#include "Record.h" -#include "clang/AST/Decl.h" -#include "clang/AST/Expr.h" -#include "clang/AST/StmtVisitor.h" -#include "llvm/ADT/Optional.h" - -namespace clang { -class QualType; - -namespace interp { -class Function; -class State; - -template <class Emitter> class LoopScope; -template <class Emitter> class SwitchScope; -template <class Emitter> class LabelScope; - -/// Compilation context for statements. -template <class Emitter> -class ByteCodeStmtGen : public ByteCodeExprGen<Emitter> { - using LabelTy = typename Emitter::LabelTy; - using AddrTy = typename Emitter::AddrTy; - using OptLabelTy = llvm::Optional<LabelTy>; - using CaseMap = llvm::DenseMap<const SwitchCase *, LabelTy>; - -public: - template<typename... Tys> - ByteCodeStmtGen(Tys&&... Args) - : ByteCodeExprGen<Emitter>(std::forward<Tys>(Args)...) {} - -protected: - bool visitFunc(const FunctionDecl *F) override; - -private: - friend class LabelScope<Emitter>; - friend class LoopScope<Emitter>; - friend class SwitchScope<Emitter>; - - // Statement visitors. - bool visitStmt(const Stmt *S); - bool visitCompoundStmt(const CompoundStmt *S); - bool visitDeclStmt(const DeclStmt *DS); - bool visitReturnStmt(const ReturnStmt *RS); - bool visitIfStmt(const IfStmt *IS); - - /// Compiles a variable declaration. - bool visitVarDecl(const VarDecl *VD); - -private: - /// Type of the expression returned by the function. - llvm::Optional<PrimType> ReturnType; - - /// Switch case mapping. - CaseMap CaseLabels; - - /// Point to break to. - OptLabelTy BreakLabel; - /// Point to continue to. - OptLabelTy ContinueLabel; - /// Default case label. - OptLabelTy DefaultLabel; -}; - -extern template class ByteCodeExprGen<EvalEmitter>; - -} // namespace interp -} // namespace clang - -#endif diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Compiler.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Compiler.cpp new file mode 100644 index 000000000000..0fc93c14131e --- /dev/null +++ b/contrib/llvm-project/clang/lib/AST/Interp/Compiler.cpp @@ -0,0 +1,5599 @@ +//===--- Compiler.cpp - Code generator for expressions ---*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "Compiler.h" +#include "ByteCodeEmitter.h" +#include "Context.h" +#include "Floating.h" +#include "Function.h" +#include "InterpShared.h" +#include "PrimType.h" +#include "Program.h" +#include "clang/AST/Attr.h" + +using namespace clang; +using namespace clang::interp; + +using APSInt = llvm::APSInt; + +namespace clang { +namespace interp { + +/// Scope used to handle temporaries in toplevel variable declarations. +template <class Emitter> class DeclScope final : public LocalScope<Emitter> { +public: + DeclScope(Compiler<Emitter> *Ctx, const ValueDecl *VD) + : LocalScope<Emitter>(Ctx, VD), Scope(Ctx->P, VD), + OldGlobalDecl(Ctx->GlobalDecl), + OldInitializingDecl(Ctx->InitializingDecl) { + Ctx->GlobalDecl = Context::shouldBeGloballyIndexed(VD); + Ctx->InitializingDecl = VD; + Ctx->InitStack.push_back(InitLink::Decl(VD)); + } + + void addExtended(const Scope::Local &Local) override { + return this->addLocal(Local); + } + + ~DeclScope() { + this->Ctx->GlobalDecl = OldGlobalDecl; + this->Ctx->InitializingDecl = OldInitializingDecl; + this->Ctx->InitStack.pop_back(); + } + +private: + Program::DeclScope Scope; + bool OldGlobalDecl; + const ValueDecl *OldInitializingDecl; +}; + +/// Scope used to handle initialization methods. +template <class Emitter> class OptionScope final { +public: + /// Root constructor, compiling or discarding primitives. + OptionScope(Compiler<Emitter> *Ctx, bool NewDiscardResult, + bool NewInitializing) + : Ctx(Ctx), OldDiscardResult(Ctx->DiscardResult), + OldInitializing(Ctx->Initializing) { + Ctx->DiscardResult = NewDiscardResult; + Ctx->Initializing = NewInitializing; + } + + ~OptionScope() { + Ctx->DiscardResult = OldDiscardResult; + Ctx->Initializing = OldInitializing; + } + +private: + /// Parent context. + Compiler<Emitter> *Ctx; + /// Old discard flag to restore. + bool OldDiscardResult; + bool OldInitializing; +}; + +template <class Emitter> +bool InitLink::emit(Compiler<Emitter> *Ctx, const Expr *E) const { + switch (Kind) { + case K_This: + return Ctx->emitThis(E); + case K_Field: + // We're assuming there's a base pointer on the stack already. + return Ctx->emitGetPtrFieldPop(Offset, E); + case K_Temp: + return Ctx->emitGetPtrLocal(Offset, E); + case K_Decl: + return Ctx->visitDeclRef(D, E); + default: + llvm_unreachable("Unhandled InitLink kind"); + } + return true; +} + +/// Scope managing label targets. +template <class Emitter> class LabelScope { +public: + virtual ~LabelScope() {} + +protected: + LabelScope(Compiler<Emitter> *Ctx) : Ctx(Ctx) {} + /// Compiler instance. + Compiler<Emitter> *Ctx; +}; + +/// Sets the context for break/continue statements. +template <class Emitter> class LoopScope final : public LabelScope<Emitter> { +public: + using LabelTy = typename Compiler<Emitter>::LabelTy; + using OptLabelTy = typename Compiler<Emitter>::OptLabelTy; + + LoopScope(Compiler<Emitter> *Ctx, LabelTy BreakLabel, LabelTy ContinueLabel) + : LabelScope<Emitter>(Ctx), OldBreakLabel(Ctx->BreakLabel), + OldContinueLabel(Ctx->ContinueLabel) { + this->Ctx->BreakLabel = BreakLabel; + this->Ctx->ContinueLabel = ContinueLabel; + } + + ~LoopScope() { + this->Ctx->BreakLabel = OldBreakLabel; + this->Ctx->ContinueLabel = OldContinueLabel; + } + +private: + OptLabelTy OldBreakLabel; + OptLabelTy OldContinueLabel; +}; + +// Sets the context for a switch scope, mapping labels. +template <class Emitter> class SwitchScope final : public LabelScope<Emitter> { +public: + using LabelTy = typename Compiler<Emitter>::LabelTy; + using OptLabelTy = typename Compiler<Emitter>::OptLabelTy; + using CaseMap = typename Compiler<Emitter>::CaseMap; + + SwitchScope(Compiler<Emitter> *Ctx, CaseMap &&CaseLabels, LabelTy BreakLabel, + OptLabelTy DefaultLabel) + : LabelScope<Emitter>(Ctx), OldBreakLabel(Ctx->BreakLabel), + OldDefaultLabel(this->Ctx->DefaultLabel), + OldCaseLabels(std::move(this->Ctx->CaseLabels)) { + this->Ctx->BreakLabel = BreakLabel; + this->Ctx->DefaultLabel = DefaultLabel; + this->Ctx->CaseLabels = std::move(CaseLabels); + } + + ~SwitchScope() { + this->Ctx->BreakLabel = OldBreakLabel; + this->Ctx->DefaultLabel = OldDefaultLabel; + this->Ctx->CaseLabels = std::move(OldCaseLabels); + } + +private: + OptLabelTy OldBreakLabel; + OptLabelTy OldDefaultLabel; + CaseMap OldCaseLabels; +}; + +template <class Emitter> class StmtExprScope final { +public: + StmtExprScope(Compiler<Emitter> *Ctx) : Ctx(Ctx), OldFlag(Ctx->InStmtExpr) { + Ctx->InStmtExpr = true; + } + + ~StmtExprScope() { Ctx->InStmtExpr = OldFlag; } + +private: + Compiler<Emitter> *Ctx; + bool OldFlag; +}; + +} // namespace interp +} // namespace clang + +template <class Emitter> +bool Compiler<Emitter>::VisitCastExpr(const CastExpr *CE) { + const Expr *SubExpr = CE->getSubExpr(); + switch (CE->getCastKind()) { + + case CK_LValueToRValue: { + if (DiscardResult) + return this->discard(SubExpr); + + std::optional<PrimType> SubExprT = classify(SubExpr->getType()); + // Prepare storage for the result. + if (!Initializing && !SubExprT) { + std::optional<unsigned> LocalIndex = allocateLocal(SubExpr); + if (!LocalIndex) + return false; + if (!this->emitGetPtrLocal(*LocalIndex, CE)) + return false; + } + + if (!this->visit(SubExpr)) + return false; + + if (SubExprT) + return this->emitLoadPop(*SubExprT, CE); + + // If the subexpr type is not primitive, we need to perform a copy here. + // This happens for example in C when dereferencing a pointer of struct + // type. + return this->emitMemcpy(CE); + } + + case CK_DerivedToBaseMemberPointer: { + assert(classifyPrim(CE->getType()) == PT_MemberPtr); + assert(classifyPrim(SubExpr->getType()) == PT_MemberPtr); + const auto *FromMP = SubExpr->getType()->getAs<MemberPointerType>(); + const auto *ToMP = CE->getType()->getAs<MemberPointerType>(); + + unsigned DerivedOffset = collectBaseOffset(QualType(ToMP->getClass(), 0), + QualType(FromMP->getClass(), 0)); + + if (!this->visit(SubExpr)) + return false; + + return this->emitGetMemberPtrBasePop(DerivedOffset, CE); + } + + case CK_BaseToDerivedMemberPointer: { + assert(classifyPrim(CE) == PT_MemberPtr); + assert(classifyPrim(SubExpr) == PT_MemberPtr); + const auto *FromMP = SubExpr->getType()->getAs<MemberPointerType>(); + const auto *ToMP = CE->getType()->getAs<MemberPointerType>(); + + unsigned DerivedOffset = collectBaseOffset(QualType(FromMP->getClass(), 0), + QualType(ToMP->getClass(), 0)); + + if (!this->visit(SubExpr)) + return false; + return this->emitGetMemberPtrBasePop(-DerivedOffset, CE); + } + + case CK_UncheckedDerivedToBase: + case CK_DerivedToBase: { + if (!this->visit(SubExpr)) + return false; + + const auto extractRecordDecl = [](QualType Ty) -> const CXXRecordDecl * { + if (const auto *PT = dyn_cast<PointerType>(Ty)) + return PT->getPointeeType()->getAsCXXRecordDecl(); + return Ty->getAsCXXRecordDecl(); + }; + + // FIXME: We can express a series of non-virtual casts as a single + // GetPtrBasePop op. + QualType CurType = SubExpr->getType(); + for (const CXXBaseSpecifier *B : CE->path()) { + if (B->isVirtual()) { + if (!this->emitGetPtrVirtBasePop(extractRecordDecl(B->getType()), CE)) + return false; + CurType = B->getType(); + } else { + unsigned DerivedOffset = collectBaseOffset(B->getType(), CurType); + if (!this->emitGetPtrBasePop(DerivedOffset, CE)) + return false; + CurType = B->getType(); + } + } + + return true; + } + + case CK_BaseToDerived: { + if (!this->visit(SubExpr)) + return false; + + unsigned DerivedOffset = + collectBaseOffset(SubExpr->getType(), CE->getType()); + + return this->emitGetPtrDerivedPop(DerivedOffset, CE); + } + + case CK_FloatingCast: { + // HLSL uses CK_FloatingCast to cast between vectors. + if (!SubExpr->getType()->isFloatingType() || + !CE->getType()->isFloatingType()) + return false; + if (DiscardResult) + return this->discard(SubExpr); + if (!this->visit(SubExpr)) + return false; + const auto *TargetSemantics = &Ctx.getFloatSemantics(CE->getType()); + return this->emitCastFP(TargetSemantics, getRoundingMode(CE), CE); + } + + case CK_IntegralToFloating: { + if (DiscardResult) + return this->discard(SubExpr); + std::optional<PrimType> FromT = classify(SubExpr->getType()); + if (!FromT) + return false; + + if (!this->visit(SubExpr)) + return false; + + const auto *TargetSemantics = &Ctx.getFloatSemantics(CE->getType()); + llvm::RoundingMode RM = getRoundingMode(CE); + return this->emitCastIntegralFloating(*FromT, TargetSemantics, RM, CE); + } + + case CK_FloatingToBoolean: + case CK_FloatingToIntegral: { + if (DiscardResult) + return this->discard(SubExpr); + + std::optional<PrimType> ToT = classify(CE->getType()); + + if (!ToT) + return false; + + if (!this->visit(SubExpr)) + return false; + + if (ToT == PT_IntAP) + return this->emitCastFloatingIntegralAP(Ctx.getBitWidth(CE->getType()), + CE); + if (ToT == PT_IntAPS) + return this->emitCastFloatingIntegralAPS(Ctx.getBitWidth(CE->getType()), + CE); + + return this->emitCastFloatingIntegral(*ToT, CE); + } + + case CK_NullToPointer: + case CK_NullToMemberPointer: { + if (DiscardResult) + return true; + + const Descriptor *Desc = nullptr; + const QualType PointeeType = CE->getType()->getPointeeType(); + if (!PointeeType.isNull()) { + if (std::optional<PrimType> T = classify(PointeeType)) + Desc = P.createDescriptor(SubExpr, *T); + } + return this->emitNull(classifyPrim(CE->getType()), Desc, CE); + } + + case CK_PointerToIntegral: { + if (DiscardResult) + return this->discard(SubExpr); + + if (!this->visit(SubExpr)) + return false; + + // If SubExpr doesn't result in a pointer, make it one. + if (PrimType FromT = classifyPrim(SubExpr->getType()); FromT != PT_Ptr) { + assert(isPtrType(FromT)); + if (!this->emitDecayPtr(FromT, PT_Ptr, CE)) + return false; + } + + PrimType T = classifyPrim(CE->getType()); + if (T == PT_IntAP) + return this->emitCastPointerIntegralAP(Ctx.getBitWidth(CE->getType()), + CE); + if (T == PT_IntAPS) + return this->emitCastPointerIntegralAPS(Ctx.getBitWidth(CE->getType()), + CE); + return this->emitCastPointerIntegral(T, CE); + } + + case CK_ArrayToPointerDecay: { + if (!this->visit(SubExpr)) + return false; + if (!this->emitArrayDecay(CE)) + return false; + if (DiscardResult) + return this->emitPopPtr(CE); + return true; + } + + case CK_IntegralToPointer: { + QualType IntType = SubExpr->getType(); + assert(IntType->isIntegralOrEnumerationType()); + if (!this->visit(SubExpr)) + return false; + // FIXME: I think the discard is wrong since the int->ptr cast might cause a + // diagnostic. + PrimType T = classifyPrim(IntType); + if (DiscardResult) + return this->emitPop(T, CE); + + QualType PtrType = CE->getType(); + assert(PtrType->isPointerType()); + + const Descriptor *Desc; + if (std::optional<PrimType> T = classify(PtrType->getPointeeType())) + Desc = P.createDescriptor(SubExpr, *T); + else if (PtrType->getPointeeType()->isVoidType()) + Desc = nullptr; + else + Desc = P.createDescriptor(CE, PtrType->getPointeeType().getTypePtr(), + Descriptor::InlineDescMD, true, false, + /*IsMutable=*/false, nullptr); + + if (!this->emitGetIntPtr(T, Desc, CE)) + return false; + + PrimType DestPtrT = classifyPrim(PtrType); + if (DestPtrT == PT_Ptr) + return true; + + // In case we're converting the integer to a non-Pointer. + return this->emitDecayPtr(PT_Ptr, DestPtrT, CE); + } + + case CK_AtomicToNonAtomic: + case CK_ConstructorConversion: + case CK_FunctionToPointerDecay: + case CK_NonAtomicToAtomic: + case CK_NoOp: + case CK_UserDefinedConversion: + case CK_AddressSpaceConversion: + return this->delegate(SubExpr); + + case CK_BitCast: { + // Reject bitcasts to atomic types. + if (CE->getType()->isAtomicType()) { + if (!this->discard(SubExpr)) + return false; + return this->emitInvalidCast(CastKind::Reinterpret, CE); + } + + if (DiscardResult) + return this->discard(SubExpr); + + QualType SubExprTy = SubExpr->getType(); + std::optional<PrimType> FromT = classify(SubExprTy); + std::optional<PrimType> ToT = classify(CE->getType()); + if (!FromT || !ToT) + return false; + + assert(isPtrType(*FromT)); + assert(isPtrType(*ToT)); + if (FromT == ToT) { + if (CE->getType()->isVoidPointerType()) + return this->delegate(SubExpr); + + if (!this->visit(SubExpr)) + return false; + if (FromT == PT_Ptr) + return this->emitPtrPtrCast(SubExprTy->isVoidPointerType(), CE); + return true; + } + + if (!this->visit(SubExpr)) + return false; + return this->emitDecayPtr(*FromT, *ToT, CE); + } + + case CK_IntegralToBoolean: + case CK_BooleanToSignedIntegral: + case CK_IntegralCast: { + if (DiscardResult) + return this->discard(SubExpr); + std::optional<PrimType> FromT = classify(SubExpr->getType()); + std::optional<PrimType> ToT = classify(CE->getType()); + + if (!FromT || !ToT) + return false; + + if (!this->visit(SubExpr)) + return false; + + // Possibly diagnose casts to enum types if the target type does not + // have a fixed size. + if (Ctx.getLangOpts().CPlusPlus && CE->getType()->isEnumeralType()) { + if (const auto *ET = CE->getType().getCanonicalType()->getAs<EnumType>(); + ET && !ET->getDecl()->isFixed()) { + if (!this->emitCheckEnumValue(*FromT, ET->getDecl(), CE)) + return false; + } + } + + if (ToT == PT_IntAP) + return this->emitCastAP(*FromT, Ctx.getBitWidth(CE->getType()), CE); + if (ToT == PT_IntAPS) + return this->emitCastAPS(*FromT, Ctx.getBitWidth(CE->getType()), CE); + + if (FromT == ToT) + return true; + if (!this->emitCast(*FromT, *ToT, CE)) + return false; + + if (CE->getCastKind() == CK_BooleanToSignedIntegral) + return this->emitNeg(*ToT, CE); + return true; + } + + case CK_PointerToBoolean: + case CK_MemberPointerToBoolean: { + PrimType PtrT = classifyPrim(SubExpr->getType()); + + // Just emit p != nullptr for this. + if (!this->visit(SubExpr)) + return false; + + if (!this->emitNull(PtrT, nullptr, CE)) + return false; + + return this->emitNE(PtrT, CE); + } + + case CK_IntegralComplexToBoolean: + case CK_FloatingComplexToBoolean: { + if (DiscardResult) + return this->discard(SubExpr); + if (!this->visit(SubExpr)) + return false; + return this->emitComplexBoolCast(SubExpr); + } + + case CK_IntegralComplexToReal: + case CK_FloatingComplexToReal: + return this->emitComplexReal(SubExpr); + + case CK_IntegralRealToComplex: + case CK_FloatingRealToComplex: { + // We're creating a complex value here, so we need to + // allocate storage for it. + if (!Initializing) { + std::optional<unsigned> LocalIndex = allocateLocal(CE); + if (!LocalIndex) + return false; + if (!this->emitGetPtrLocal(*LocalIndex, CE)) + return false; + } + + // Init the complex value to {SubExpr, 0}. + if (!this->visitArrayElemInit(0, SubExpr)) + return false; + // Zero-init the second element. + PrimType T = classifyPrim(SubExpr->getType()); + if (!this->visitZeroInitializer(T, SubExpr->getType(), SubExpr)) + return false; + return this->emitInitElem(T, 1, SubExpr); + } + + case CK_IntegralComplexCast: + case CK_FloatingComplexCast: + case CK_IntegralComplexToFloatingComplex: + case CK_FloatingComplexToIntegralComplex: { + assert(CE->getType()->isAnyComplexType()); + assert(SubExpr->getType()->isAnyComplexType()); + if (DiscardResult) + return this->discard(SubExpr); + + if (!Initializing) { + std::optional<unsigned> LocalIndex = allocateLocal(CE); + if (!LocalIndex) + return false; + if (!this->emitGetPtrLocal(*LocalIndex, CE)) + return false; + } + + // Location for the SubExpr. + // Since SubExpr is of complex type, visiting it results in a pointer + // anyway, so we just create a temporary pointer variable. + unsigned SubExprOffset = allocateLocalPrimitive( + SubExpr, PT_Ptr, /*IsConst=*/true, /*IsExtended=*/false); + if (!this->visit(SubExpr)) + return false; + if (!this->emitSetLocal(PT_Ptr, SubExprOffset, CE)) + return false; + + PrimType SourceElemT = classifyComplexElementType(SubExpr->getType()); + QualType DestElemType = + CE->getType()->getAs<ComplexType>()->getElementType(); + PrimType DestElemT = classifyPrim(DestElemType); + // Cast both elements individually. + for (unsigned I = 0; I != 2; ++I) { + if (!this->emitGetLocal(PT_Ptr, SubExprOffset, CE)) + return false; + if (!this->emitArrayElemPop(SourceElemT, I, CE)) + return false; + + // Do the cast. + if (!this->emitPrimCast(SourceElemT, DestElemT, DestElemType, CE)) + return false; + + // Save the value. + if (!this->emitInitElem(DestElemT, I, CE)) + return false; + } + return true; + } + + case CK_VectorSplat: { + assert(!classify(CE->getType())); + assert(classify(SubExpr->getType())); + assert(CE->getType()->isVectorType()); + + if (DiscardResult) + return this->discard(SubExpr); + + if (!Initializing) { + std::optional<unsigned> LocalIndex = allocateLocal(CE); + if (!LocalIndex) + return false; + if (!this->emitGetPtrLocal(*LocalIndex, CE)) + return false; + } + + const auto *VT = CE->getType()->getAs<VectorType>(); + PrimType ElemT = classifyPrim(SubExpr->getType()); + unsigned ElemOffset = allocateLocalPrimitive( + SubExpr, ElemT, /*IsConst=*/true, /*IsExtended=*/false); + + // Prepare a local variable for the scalar value. + if (!this->visit(SubExpr)) + return false; + if (classifyPrim(SubExpr) == PT_Ptr && !this->emitLoadPop(ElemT, CE)) + return false; + + if (!this->emitSetLocal(ElemT, ElemOffset, CE)) + return false; + + for (unsigned I = 0; I != VT->getNumElements(); ++I) { + if (!this->emitGetLocal(ElemT, ElemOffset, CE)) + return false; + if (!this->emitInitElem(ElemT, I, CE)) + return false; + } + + return true; + } + + case CK_ToVoid: + return discard(SubExpr); + + default: + return this->emitInvalid(CE); + } + llvm_unreachable("Unhandled clang::CastKind enum"); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitIntegerLiteral(const IntegerLiteral *LE) { + if (DiscardResult) + return true; + + return this->emitConst(LE->getValue(), LE); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitFloatingLiteral(const FloatingLiteral *E) { + if (DiscardResult) + return true; + + return this->emitConstFloat(E->getValue(), E); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitImaginaryLiteral(const ImaginaryLiteral *E) { + assert(E->getType()->isAnyComplexType()); + if (DiscardResult) + return true; + + if (!Initializing) { + std::optional<unsigned> LocalIndex = allocateLocal(E); + if (!LocalIndex) + return false; + if (!this->emitGetPtrLocal(*LocalIndex, E)) + return false; + } + + const Expr *SubExpr = E->getSubExpr(); + PrimType SubExprT = classifyPrim(SubExpr->getType()); + + if (!this->visitZeroInitializer(SubExprT, SubExpr->getType(), SubExpr)) + return false; + if (!this->emitInitElem(SubExprT, 0, SubExpr)) + return false; + return this->visitArrayElemInit(1, SubExpr); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitParenExpr(const ParenExpr *E) { + return this->delegate(E->getSubExpr()); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitBinaryOperator(const BinaryOperator *BO) { + // Need short-circuiting for these. + if (BO->isLogicalOp()) + return this->VisitLogicalBinOp(BO); + + const Expr *LHS = BO->getLHS(); + const Expr *RHS = BO->getRHS(); + + // Handle comma operators. Just discard the LHS + // and delegate to RHS. + if (BO->isCommaOp()) { + if (!this->discard(LHS)) + return false; + if (RHS->getType()->isVoidType()) + return this->discard(RHS); + + return this->delegate(RHS); + } + + if (BO->getType()->isAnyComplexType()) + return this->VisitComplexBinOp(BO); + if ((LHS->getType()->isAnyComplexType() || + RHS->getType()->isAnyComplexType()) && + BO->isComparisonOp()) + return this->emitComplexComparison(LHS, RHS, BO); + + if (BO->isPtrMemOp()) { + if (!this->visit(LHS)) + return false; + + if (!this->visit(RHS)) + return false; + + if (!this->emitToMemberPtr(BO)) + return false; + + if (classifyPrim(BO) == PT_MemberPtr) + return true; + + if (!this->emitCastMemberPtrPtr(BO)) + return false; + return DiscardResult ? this->emitPopPtr(BO) : true; + } + + // Typecheck the args. + std::optional<PrimType> LT = classify(LHS->getType()); + std::optional<PrimType> RT = classify(RHS->getType()); + std::optional<PrimType> T = classify(BO->getType()); + + // Special case for C++'s three-way/spaceship operator <=>, which + // returns a std::{strong,weak,partial}_ordering (which is a class, so doesn't + // have a PrimType). + if (!T && BO->getOpcode() == BO_Cmp) { + if (DiscardResult) + return true; + const ComparisonCategoryInfo *CmpInfo = + Ctx.getASTContext().CompCategories.lookupInfoForType(BO->getType()); + assert(CmpInfo); + + // We need a temporary variable holding our return value. + if (!Initializing) { + std::optional<unsigned> ResultIndex = this->allocateLocal(BO); + if (!this->emitGetPtrLocal(*ResultIndex, BO)) + return false; + } + + if (!visit(LHS) || !visit(RHS)) + return false; + + return this->emitCMP3(*LT, CmpInfo, BO); + } + + if (!LT || !RT || !T) + return false; + + // Pointer arithmetic special case. + if (BO->getOpcode() == BO_Add || BO->getOpcode() == BO_Sub) { + if (isPtrType(*T) || (isPtrType(*LT) && isPtrType(*RT))) + return this->VisitPointerArithBinOp(BO); + } + + if (!visit(LHS) || !visit(RHS)) + return false; + + // For languages such as C, cast the result of one + // of our comparision opcodes to T (which is usually int). + auto MaybeCastToBool = [this, T, BO](bool Result) { + if (!Result) + return false; + if (DiscardResult) + return this->emitPop(*T, BO); + if (T != PT_Bool) + return this->emitCast(PT_Bool, *T, BO); + return true; + }; + + auto Discard = [this, T, BO](bool Result) { + if (!Result) + return false; + return DiscardResult ? this->emitPop(*T, BO) : true; + }; + + switch (BO->getOpcode()) { + case BO_EQ: + return MaybeCastToBool(this->emitEQ(*LT, BO)); + case BO_NE: + return MaybeCastToBool(this->emitNE(*LT, BO)); + case BO_LT: + return MaybeCastToBool(this->emitLT(*LT, BO)); + case BO_LE: + return MaybeCastToBool(this->emitLE(*LT, BO)); + case BO_GT: + return MaybeCastToBool(this->emitGT(*LT, BO)); + case BO_GE: + return MaybeCastToBool(this->emitGE(*LT, BO)); + case BO_Sub: + if (BO->getType()->isFloatingType()) + return Discard(this->emitSubf(getRoundingMode(BO), BO)); + return Discard(this->emitSub(*T, BO)); + case BO_Add: + if (BO->getType()->isFloatingType()) + return Discard(this->emitAddf(getRoundingMode(BO), BO)); + return Discard(this->emitAdd(*T, BO)); + case BO_Mul: + if (BO->getType()->isFloatingType()) + return Discard(this->emitMulf(getRoundingMode(BO), BO)); + return Discard(this->emitMul(*T, BO)); + case BO_Rem: + return Discard(this->emitRem(*T, BO)); + case BO_Div: + if (BO->getType()->isFloatingType()) + return Discard(this->emitDivf(getRoundingMode(BO), BO)); + return Discard(this->emitDiv(*T, BO)); + case BO_Assign: + if (DiscardResult) + return LHS->refersToBitField() ? this->emitStoreBitFieldPop(*T, BO) + : this->emitStorePop(*T, BO); + if (LHS->refersToBitField()) { + if (!this->emitStoreBitField(*T, BO)) + return false; + } else { + if (!this->emitStore(*T, BO)) + return false; + } + // Assignments aren't necessarily lvalues in C. + // Load from them in that case. + if (!BO->isLValue()) + return this->emitLoadPop(*T, BO); + return true; + case BO_And: + return Discard(this->emitBitAnd(*T, BO)); + case BO_Or: + return Discard(this->emitBitOr(*T, BO)); + case BO_Shl: + return Discard(this->emitShl(*LT, *RT, BO)); + case BO_Shr: + return Discard(this->emitShr(*LT, *RT, BO)); + case BO_Xor: + return Discard(this->emitBitXor(*T, BO)); + case BO_LOr: + case BO_LAnd: + llvm_unreachable("Already handled earlier"); + default: + return false; + } + + llvm_unreachable("Unhandled binary op"); +} + +/// Perform addition/subtraction of a pointer and an integer or +/// subtraction of two pointers. +template <class Emitter> +bool Compiler<Emitter>::VisitPointerArithBinOp(const BinaryOperator *E) { + BinaryOperatorKind Op = E->getOpcode(); + const Expr *LHS = E->getLHS(); + const Expr *RHS = E->getRHS(); + + if ((Op != BO_Add && Op != BO_Sub) || + (!LHS->getType()->isPointerType() && !RHS->getType()->isPointerType())) + return false; + + std::optional<PrimType> LT = classify(LHS); + std::optional<PrimType> RT = classify(RHS); + + if (!LT || !RT) + return false; + + if (LHS->getType()->isPointerType() && RHS->getType()->isPointerType()) { + if (Op != BO_Sub) + return false; + + assert(E->getType()->isIntegerType()); + if (!visit(RHS) || !visit(LHS)) + return false; + + return this->emitSubPtr(classifyPrim(E->getType()), E); + } + + PrimType OffsetType; + if (LHS->getType()->isIntegerType()) { + if (!visit(RHS) || !visit(LHS)) + return false; + OffsetType = *LT; + } else if (RHS->getType()->isIntegerType()) { + if (!visit(LHS) || !visit(RHS)) + return false; + OffsetType = *RT; + } else { + return false; + } + + if (Op == BO_Add) + return this->emitAddOffset(OffsetType, E); + else if (Op == BO_Sub) + return this->emitSubOffset(OffsetType, E); + + return false; +} + +template <class Emitter> +bool Compiler<Emitter>::VisitLogicalBinOp(const BinaryOperator *E) { + assert(E->isLogicalOp()); + BinaryOperatorKind Op = E->getOpcode(); + const Expr *LHS = E->getLHS(); + const Expr *RHS = E->getRHS(); + std::optional<PrimType> T = classify(E->getType()); + + if (Op == BO_LOr) { + // Logical OR. Visit LHS and only evaluate RHS if LHS was FALSE. + LabelTy LabelTrue = this->getLabel(); + LabelTy LabelEnd = this->getLabel(); + + if (!this->visitBool(LHS)) + return false; + if (!this->jumpTrue(LabelTrue)) + return false; + + if (!this->visitBool(RHS)) + return false; + if (!this->jump(LabelEnd)) + return false; + + this->emitLabel(LabelTrue); + this->emitConstBool(true, E); + this->fallthrough(LabelEnd); + this->emitLabel(LabelEnd); + + } else { + assert(Op == BO_LAnd); + // Logical AND. + // Visit LHS. Only visit RHS if LHS was TRUE. + LabelTy LabelFalse = this->getLabel(); + LabelTy LabelEnd = this->getLabel(); + + if (!this->visitBool(LHS)) + return false; + if (!this->jumpFalse(LabelFalse)) + return false; + + if (!this->visitBool(RHS)) + return false; + if (!this->jump(LabelEnd)) + return false; + + this->emitLabel(LabelFalse); + this->emitConstBool(false, E); + this->fallthrough(LabelEnd); + this->emitLabel(LabelEnd); + } + + if (DiscardResult) + return this->emitPopBool(E); + + // For C, cast back to integer type. + assert(T); + if (T != PT_Bool) + return this->emitCast(PT_Bool, *T, E); + return true; +} + +template <class Emitter> +bool Compiler<Emitter>::VisitComplexBinOp(const BinaryOperator *E) { + // Prepare storage for result. + if (!Initializing) { + std::optional<unsigned> LocalIndex = allocateLocal(E); + if (!LocalIndex) + return false; + if (!this->emitGetPtrLocal(*LocalIndex, E)) + return false; + } + + // Both LHS and RHS might _not_ be of complex type, but one of them + // needs to be. + const Expr *LHS = E->getLHS(); + const Expr *RHS = E->getRHS(); + + PrimType ResultElemT = this->classifyComplexElementType(E->getType()); + unsigned ResultOffset = ~0u; + if (!DiscardResult) + ResultOffset = this->allocateLocalPrimitive(E, PT_Ptr, true, false); + + // Save result pointer in ResultOffset + if (!this->DiscardResult) { + if (!this->emitDupPtr(E)) + return false; + if (!this->emitSetLocal(PT_Ptr, ResultOffset, E)) + return false; + } + QualType LHSType = LHS->getType(); + if (const auto *AT = LHSType->getAs<AtomicType>()) + LHSType = AT->getValueType(); + QualType RHSType = RHS->getType(); + if (const auto *AT = RHSType->getAs<AtomicType>()) + RHSType = AT->getValueType(); + + bool LHSIsComplex = LHSType->isAnyComplexType(); + unsigned LHSOffset; + bool RHSIsComplex = RHSType->isAnyComplexType(); + + // For ComplexComplex Mul, we have special ops to make their implementation + // easier. + BinaryOperatorKind Op = E->getOpcode(); + if (Op == BO_Mul && LHSIsComplex && RHSIsComplex) { + assert(classifyPrim(LHSType->getAs<ComplexType>()->getElementType()) == + classifyPrim(RHSType->getAs<ComplexType>()->getElementType())); + PrimType ElemT = + classifyPrim(LHSType->getAs<ComplexType>()->getElementType()); + if (!this->visit(LHS)) + return false; + if (!this->visit(RHS)) + return false; + return this->emitMulc(ElemT, E); + } + + if (Op == BO_Div && RHSIsComplex) { + QualType ElemQT = RHSType->getAs<ComplexType>()->getElementType(); + PrimType ElemT = classifyPrim(ElemQT); + // If the LHS is not complex, we still need to do the full complex + // division, so just stub create a complex value and stub it out with + // the LHS and a zero. + + if (!LHSIsComplex) { + // This is using the RHS type for the fake-complex LHS. + if (auto LHSO = allocateLocal(RHS)) + LHSOffset = *LHSO; + else + return false; + + if (!this->emitGetPtrLocal(LHSOffset, E)) + return false; + + if (!this->visit(LHS)) + return false; + // real is LHS + if (!this->emitInitElem(ElemT, 0, E)) + return false; + // imag is zero + if (!this->visitZeroInitializer(ElemT, ElemQT, E)) + return false; + if (!this->emitInitElem(ElemT, 1, E)) + return false; + } else { + if (!this->visit(LHS)) + return false; + } + + if (!this->visit(RHS)) + return false; + return this->emitDivc(ElemT, E); + } + + // Evaluate LHS and save value to LHSOffset. + if (LHSType->isAnyComplexType()) { + LHSOffset = this->allocateLocalPrimitive(LHS, PT_Ptr, true, false); + if (!this->visit(LHS)) + return false; + if (!this->emitSetLocal(PT_Ptr, LHSOffset, E)) + return false; + } else { + PrimType LHST = classifyPrim(LHSType); + LHSOffset = this->allocateLocalPrimitive(LHS, LHST, true, false); + if (!this->visit(LHS)) + return false; + if (!this->emitSetLocal(LHST, LHSOffset, E)) + return false; + } + + // Same with RHS. + unsigned RHSOffset; + if (RHSType->isAnyComplexType()) { + RHSOffset = this->allocateLocalPrimitive(RHS, PT_Ptr, true, false); + if (!this->visit(RHS)) + return false; + if (!this->emitSetLocal(PT_Ptr, RHSOffset, E)) + return false; + } else { + PrimType RHST = classifyPrim(RHSType); + RHSOffset = this->allocateLocalPrimitive(RHS, RHST, true, false); + if (!this->visit(RHS)) + return false; + if (!this->emitSetLocal(RHST, RHSOffset, E)) + return false; + } + + // For both LHS and RHS, either load the value from the complex pointer, or + // directly from the local variable. For index 1 (i.e. the imaginary part), + // just load 0 and do the operation anyway. + auto loadComplexValue = [this](bool IsComplex, bool LoadZero, + unsigned ElemIndex, unsigned Offset, + const Expr *E) -> bool { + if (IsComplex) { + if (!this->emitGetLocal(PT_Ptr, Offset, E)) + return false; + return this->emitArrayElemPop(classifyComplexElementType(E->getType()), + ElemIndex, E); + } + if (ElemIndex == 0 || !LoadZero) + return this->emitGetLocal(classifyPrim(E->getType()), Offset, E); + return this->visitZeroInitializer(classifyPrim(E->getType()), E->getType(), + E); + }; + + // Now we can get pointers to the LHS and RHS from the offsets above. + for (unsigned ElemIndex = 0; ElemIndex != 2; ++ElemIndex) { + // Result pointer for the store later. + if (!this->DiscardResult) { + if (!this->emitGetLocal(PT_Ptr, ResultOffset, E)) + return false; + } + + // The actual operation. + switch (Op) { + case BO_Add: + if (!loadComplexValue(LHSIsComplex, true, ElemIndex, LHSOffset, LHS)) + return false; + + if (!loadComplexValue(RHSIsComplex, true, ElemIndex, RHSOffset, RHS)) + return false; + if (ResultElemT == PT_Float) { + if (!this->emitAddf(getRoundingMode(E), E)) + return false; + } else { + if (!this->emitAdd(ResultElemT, E)) + return false; + } + break; + case BO_Sub: + if (!loadComplexValue(LHSIsComplex, true, ElemIndex, LHSOffset, LHS)) + return false; + + if (!loadComplexValue(RHSIsComplex, true, ElemIndex, RHSOffset, RHS)) + return false; + if (ResultElemT == PT_Float) { + if (!this->emitSubf(getRoundingMode(E), E)) + return false; + } else { + if (!this->emitSub(ResultElemT, E)) + return false; + } + break; + case BO_Mul: + if (!loadComplexValue(LHSIsComplex, false, ElemIndex, LHSOffset, LHS)) + return false; + + if (!loadComplexValue(RHSIsComplex, false, ElemIndex, RHSOffset, RHS)) + return false; + + if (ResultElemT == PT_Float) { + if (!this->emitMulf(getRoundingMode(E), E)) + return false; + } else { + if (!this->emitMul(ResultElemT, E)) + return false; + } + break; + case BO_Div: + assert(!RHSIsComplex); + if (!loadComplexValue(LHSIsComplex, false, ElemIndex, LHSOffset, LHS)) + return false; + + if (!loadComplexValue(RHSIsComplex, false, ElemIndex, RHSOffset, RHS)) + return false; + + if (ResultElemT == PT_Float) { + if (!this->emitDivf(getRoundingMode(E), E)) + return false; + } else { + if (!this->emitDiv(ResultElemT, E)) + return false; + } + break; + + default: + return false; + } + + if (!this->DiscardResult) { + // Initialize array element with the value we just computed. + if (!this->emitInitElemPop(ResultElemT, ElemIndex, E)) + return false; + } else { + if (!this->emitPop(ResultElemT, E)) + return false; + } + } + return true; +} + +template <class Emitter> +bool Compiler<Emitter>::VisitImplicitValueInitExpr( + const ImplicitValueInitExpr *E) { + QualType QT = E->getType(); + + if (std::optional<PrimType> T = classify(QT)) + return this->visitZeroInitializer(*T, QT, E); + + if (QT->isRecordType()) { + const RecordDecl *RD = QT->getAsRecordDecl(); + assert(RD); + if (RD->isInvalidDecl()) + return false; + if (RD->isUnion()) { + // C++11 [dcl.init]p5: If T is a (possibly cv-qualified) union type, the + // object's first non-static named data member is zero-initialized + // FIXME + return false; + } + + if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD); + CXXRD && CXXRD->getNumVBases() > 0) { + // TODO: Diagnose. + return false; + } + + const Record *R = getRecord(QT); + if (!R) + return false; + + assert(Initializing); + return this->visitZeroRecordInitializer(R, E); + } + + if (QT->isIncompleteArrayType()) + return true; + + if (QT->isArrayType()) { + const ArrayType *AT = QT->getAsArrayTypeUnsafe(); + assert(AT); + const auto *CAT = cast<ConstantArrayType>(AT); + size_t NumElems = CAT->getZExtSize(); + PrimType ElemT = classifyPrim(CAT->getElementType()); + + for (size_t I = 0; I != NumElems; ++I) { + if (!this->visitZeroInitializer(ElemT, CAT->getElementType(), E)) + return false; + if (!this->emitInitElem(ElemT, I, E)) + return false; + } + + return true; + } + + if (const auto *ComplexTy = E->getType()->getAs<ComplexType>()) { + assert(Initializing); + QualType ElemQT = ComplexTy->getElementType(); + PrimType ElemT = classifyPrim(ElemQT); + for (unsigned I = 0; I < 2; ++I) { + if (!this->visitZeroInitializer(ElemT, ElemQT, E)) + return false; + if (!this->emitInitElem(ElemT, I, E)) + return false; + } + return true; + } + + if (const auto *VecT = E->getType()->getAs<VectorType>()) { + unsigned NumVecElements = VecT->getNumElements(); + QualType ElemQT = VecT->getElementType(); + PrimType ElemT = classifyPrim(ElemQT); + + for (unsigned I = 0; I < NumVecElements; ++I) { + if (!this->visitZeroInitializer(ElemT, ElemQT, E)) + return false; + if (!this->emitInitElem(ElemT, I, E)) + return false; + } + return true; + } + + return false; +} + +template <class Emitter> +bool Compiler<Emitter>::VisitArraySubscriptExpr(const ArraySubscriptExpr *E) { + const Expr *Base = E->getBase(); + const Expr *Index = E->getIdx(); + + if (DiscardResult) + return this->discard(Base) && this->discard(Index); + + // Take pointer of LHS, add offset from RHS. + // What's left on the stack after this is a pointer. + if (!this->visit(Base)) + return false; + + if (!this->visit(Index)) + return false; + + PrimType IndexT = classifyPrim(Index->getType()); + return this->emitArrayElemPtrPop(IndexT, E); +} + +template <class Emitter> +bool Compiler<Emitter>::visitInitList(ArrayRef<const Expr *> Inits, + const Expr *ArrayFiller, const Expr *E) { + + QualType QT = E->getType(); + + if (const auto *AT = QT->getAs<AtomicType>()) + QT = AT->getValueType(); + + if (QT->isVoidType()) + return this->emitInvalid(E); + + // Handle discarding first. + if (DiscardResult) { + for (const Expr *Init : Inits) { + if (!this->discard(Init)) + return false; + } + return true; + } + + // Primitive values. + if (std::optional<PrimType> T = classify(QT)) { + assert(!DiscardResult); + if (Inits.size() == 0) + return this->visitZeroInitializer(*T, QT, E); + assert(Inits.size() == 1); + return this->delegate(Inits[0]); + } + + if (QT->isRecordType()) { + const Record *R = getRecord(QT); + + if (Inits.size() == 1 && E->getType() == Inits[0]->getType()) + return this->delegate(Inits[0]); + + auto initPrimitiveField = [=](const Record::Field *FieldToInit, + const Expr *Init, PrimType T) -> bool { + InitStackScope<Emitter> ISS(this, isa<CXXDefaultInitExpr>(Init)); + if (!this->visit(Init)) + return false; + + if (FieldToInit->isBitField()) + return this->emitInitBitField(T, FieldToInit, E); + return this->emitInitField(T, FieldToInit->Offset, E); + }; + + auto initCompositeField = [=](const Record::Field *FieldToInit, + const Expr *Init) -> bool { + InitStackScope<Emitter> ISS(this, isa<CXXDefaultInitExpr>(Init)); + InitLinkScope<Emitter> ILS(this, InitLink::Field(FieldToInit->Offset)); + // Non-primitive case. Get a pointer to the field-to-initialize + // on the stack and recurse into visitInitializer(). + if (!this->emitGetPtrField(FieldToInit->Offset, Init)) + return false; + if (!this->visitInitializer(Init)) + return false; + return this->emitPopPtr(E); + }; + + if (R->isUnion()) { + if (Inits.size() == 0) { + // Zero-initialize the first union field. + if (R->getNumFields() == 0) + return this->emitFinishInit(E); + const Record::Field *FieldToInit = R->getField(0u); + QualType FieldType = FieldToInit->Desc->getType(); + if (std::optional<PrimType> T = classify(FieldType)) { + if (!this->visitZeroInitializer(*T, FieldType, E)) + return false; + if (!this->emitInitField(*T, FieldToInit->Offset, E)) + return false; + } + // FIXME: Non-primitive case? + } else { + const Expr *Init = Inits[0]; + const FieldDecl *FToInit = nullptr; + if (const auto *ILE = dyn_cast<InitListExpr>(E)) + FToInit = ILE->getInitializedFieldInUnion(); + else + FToInit = cast<CXXParenListInitExpr>(E)->getInitializedFieldInUnion(); + + const Record::Field *FieldToInit = R->getField(FToInit); + if (std::optional<PrimType> T = classify(Init)) { + if (!initPrimitiveField(FieldToInit, Init, *T)) + return false; + } else { + if (!initCompositeField(FieldToInit, Init)) + return false; + } + } + return this->emitFinishInit(E); + } + + assert(!R->isUnion()); + unsigned InitIndex = 0; + for (const Expr *Init : Inits) { + // Skip unnamed bitfields. + while (InitIndex < R->getNumFields() && + R->getField(InitIndex)->Decl->isUnnamedBitField()) + ++InitIndex; + + if (std::optional<PrimType> T = classify(Init)) { + const Record::Field *FieldToInit = R->getField(InitIndex); + if (!initPrimitiveField(FieldToInit, Init, *T)) + return false; + ++InitIndex; + } else { + // Initializer for a direct base class. + if (const Record::Base *B = R->getBase(Init->getType())) { + if (!this->emitGetPtrBase(B->Offset, Init)) + return false; + + if (!this->visitInitializer(Init)) + return false; + + if (!this->emitFinishInitPop(E)) + return false; + // Base initializers don't increase InitIndex, since they don't count + // into the Record's fields. + } else { + const Record::Field *FieldToInit = R->getField(InitIndex); + if (!initCompositeField(FieldToInit, Init)) + return false; + ++InitIndex; + } + } + } + return this->emitFinishInit(E); + } + + if (QT->isArrayType()) { + if (Inits.size() == 1 && QT == Inits[0]->getType()) + return this->delegate(Inits[0]); + + unsigned ElementIndex = 0; + for (const Expr *Init : Inits) { + if (const auto *EmbedS = + dyn_cast<EmbedExpr>(Init->IgnoreParenImpCasts())) { + PrimType TargetT = classifyPrim(Init->getType()); + + auto Eval = [&](const Expr *Init, unsigned ElemIndex) { + PrimType InitT = classifyPrim(Init->getType()); + if (!this->visit(Init)) + return false; + if (InitT != TargetT) { + if (!this->emitCast(InitT, TargetT, E)) + return false; + } + return this->emitInitElem(TargetT, ElemIndex, Init); + }; + if (!EmbedS->doForEachDataElement(Eval, ElementIndex)) + return false; + } else { + if (!this->visitArrayElemInit(ElementIndex, Init)) + return false; + ++ElementIndex; + } + } + + // Expand the filler expression. + // FIXME: This should go away. + if (ArrayFiller) { + const ConstantArrayType *CAT = + Ctx.getASTContext().getAsConstantArrayType(QT); + uint64_t NumElems = CAT->getZExtSize(); + + for (; ElementIndex != NumElems; ++ElementIndex) { + if (!this->visitArrayElemInit(ElementIndex, ArrayFiller)) + return false; + } + } + + return this->emitFinishInit(E); + } + + if (const auto *ComplexTy = QT->getAs<ComplexType>()) { + unsigned NumInits = Inits.size(); + + if (NumInits == 1) + return this->delegate(Inits[0]); + + QualType ElemQT = ComplexTy->getElementType(); + PrimType ElemT = classifyPrim(ElemQT); + if (NumInits == 0) { + // Zero-initialize both elements. + for (unsigned I = 0; I < 2; ++I) { + if (!this->visitZeroInitializer(ElemT, ElemQT, E)) + return false; + if (!this->emitInitElem(ElemT, I, E)) + return false; + } + } else if (NumInits == 2) { + unsigned InitIndex = 0; + for (const Expr *Init : Inits) { + if (!this->visit(Init)) + return false; + + if (!this->emitInitElem(ElemT, InitIndex, E)) + return false; + ++InitIndex; + } + } + return true; + } + + if (const auto *VecT = QT->getAs<VectorType>()) { + unsigned NumVecElements = VecT->getNumElements(); + assert(NumVecElements >= Inits.size()); + + QualType ElemQT = VecT->getElementType(); + PrimType ElemT = classifyPrim(ElemQT); + + // All initializer elements. + unsigned InitIndex = 0; + for (const Expr *Init : Inits) { + if (!this->visit(Init)) + return false; + + // If the initializer is of vector type itself, we have to deconstruct + // that and initialize all the target fields from the initializer fields. + if (const auto *InitVecT = Init->getType()->getAs<VectorType>()) { + if (!this->emitCopyArray(ElemT, 0, InitIndex, + InitVecT->getNumElements(), E)) + return false; + InitIndex += InitVecT->getNumElements(); + } else { + if (!this->emitInitElem(ElemT, InitIndex, E)) + return false; + ++InitIndex; + } + } + + assert(InitIndex <= NumVecElements); + + // Fill the rest with zeroes. + for (; InitIndex != NumVecElements; ++InitIndex) { + if (!this->visitZeroInitializer(ElemT, ElemQT, E)) + return false; + if (!this->emitInitElem(ElemT, InitIndex, E)) + return false; + } + return true; + } + + return false; +} + +/// Pointer to the array(not the element!) must be on the stack when calling +/// this. +template <class Emitter> +bool Compiler<Emitter>::visitArrayElemInit(unsigned ElemIndex, + const Expr *Init) { + if (std::optional<PrimType> T = classify(Init->getType())) { + // Visit the primitive element like normal. + if (!this->visit(Init)) + return false; + return this->emitInitElem(*T, ElemIndex, Init); + } + + // Advance the pointer currently on the stack to the given + // dimension. + if (!this->emitConstUint32(ElemIndex, Init)) + return false; + if (!this->emitArrayElemPtrUint32(Init)) + return false; + if (!this->visitInitializer(Init)) + return false; + return this->emitFinishInitPop(Init); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitInitListExpr(const InitListExpr *E) { + return this->visitInitList(E->inits(), E->getArrayFiller(), E); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitCXXParenListInitExpr( + const CXXParenListInitExpr *E) { + return this->visitInitList(E->getInitExprs(), E->getArrayFiller(), E); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitSubstNonTypeTemplateParmExpr( + const SubstNonTypeTemplateParmExpr *E) { + return this->delegate(E->getReplacement()); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitConstantExpr(const ConstantExpr *E) { + std::optional<PrimType> T = classify(E->getType()); + if (T && E->hasAPValueResult()) { + // Try to emit the APValue directly, without visiting the subexpr. + // This will only fail if we can't emit the APValue, so won't emit any + // diagnostics or any double values. + if (DiscardResult) + return true; + + if (this->visitAPValue(E->getAPValueResult(), *T, E)) + return true; + } + return this->delegate(E->getSubExpr()); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitEmbedExpr(const EmbedExpr *E) { + auto It = E->begin(); + return this->visit(*It); +} + +static CharUnits AlignOfType(QualType T, const ASTContext &ASTCtx, + UnaryExprOrTypeTrait Kind) { + bool AlignOfReturnsPreferred = + ASTCtx.getLangOpts().getClangABICompat() <= LangOptions::ClangABI::Ver7; + + // C++ [expr.alignof]p3: + // When alignof is applied to a reference type, the result is the + // alignment of the referenced type. + if (const auto *Ref = T->getAs<ReferenceType>()) + T = Ref->getPointeeType(); + + if (T.getQualifiers().hasUnaligned()) + return CharUnits::One(); + + // __alignof is defined to return the preferred alignment. + // Before 8, clang returned the preferred alignment for alignof and + // _Alignof as well. + if (Kind == UETT_PreferredAlignOf || AlignOfReturnsPreferred) + return ASTCtx.toCharUnitsFromBits(ASTCtx.getPreferredTypeAlign(T)); + + return ASTCtx.getTypeAlignInChars(T); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitUnaryExprOrTypeTraitExpr( + const UnaryExprOrTypeTraitExpr *E) { + UnaryExprOrTypeTrait Kind = E->getKind(); + const ASTContext &ASTCtx = Ctx.getASTContext(); + + if (Kind == UETT_SizeOf || Kind == UETT_DataSizeOf) { + QualType ArgType = E->getTypeOfArgument(); + + // C++ [expr.sizeof]p2: "When applied to a reference or a reference type, + // the result is the size of the referenced type." + if (const auto *Ref = ArgType->getAs<ReferenceType>()) + ArgType = Ref->getPointeeType(); + + CharUnits Size; + if (ArgType->isVoidType() || ArgType->isFunctionType()) + Size = CharUnits::One(); + else { + if (ArgType->isDependentType() || !ArgType->isConstantSizeType()) + return false; + + if (Kind == UETT_SizeOf) + Size = ASTCtx.getTypeSizeInChars(ArgType); + else + Size = ASTCtx.getTypeInfoDataSizeInChars(ArgType).Width; + } + + if (DiscardResult) + return true; + + return this->emitConst(Size.getQuantity(), E); + } + + if (Kind == UETT_AlignOf || Kind == UETT_PreferredAlignOf) { + CharUnits Size; + + if (E->isArgumentType()) { + QualType ArgType = E->getTypeOfArgument(); + + Size = AlignOfType(ArgType, ASTCtx, Kind); + } else { + // Argument is an expression, not a type. + const Expr *Arg = E->getArgumentExpr()->IgnoreParens(); + + // The kinds of expressions that we have special-case logic here for + // should be kept up to date with the special checks for those + // expressions in Sema. + + // alignof decl is always accepted, even if it doesn't make sense: we + // default to 1 in those cases. + if (const auto *DRE = dyn_cast<DeclRefExpr>(Arg)) + Size = ASTCtx.getDeclAlign(DRE->getDecl(), + /*RefAsPointee*/ true); + else if (const auto *ME = dyn_cast<MemberExpr>(Arg)) + Size = ASTCtx.getDeclAlign(ME->getMemberDecl(), + /*RefAsPointee*/ true); + else + Size = AlignOfType(Arg->getType(), ASTCtx, Kind); + } + + if (DiscardResult) + return true; + + return this->emitConst(Size.getQuantity(), E); + } + + if (Kind == UETT_VectorElements) { + if (const auto *VT = E->getTypeOfArgument()->getAs<VectorType>()) + return this->emitConst(VT->getNumElements(), E); + assert(E->getTypeOfArgument()->isSizelessVectorType()); + return this->emitSizelessVectorElementSize(E); + } + + if (Kind == UETT_VecStep) { + if (const auto *VT = E->getTypeOfArgument()->getAs<VectorType>()) { + unsigned N = VT->getNumElements(); + + // The vec_step built-in functions that take a 3-component + // vector return 4. (OpenCL 1.1 spec 6.11.12) + if (N == 3) + N = 4; + + return this->emitConst(N, E); + } + return this->emitConst(1, E); + } + + return false; +} + +template <class Emitter> +bool Compiler<Emitter>::VisitMemberExpr(const MemberExpr *E) { + // 'Base.Member' + const Expr *Base = E->getBase(); + const ValueDecl *Member = E->getMemberDecl(); + + if (DiscardResult) + return this->discard(Base); + + // MemberExprs are almost always lvalues, in which case we don't need to + // do the load. But sometimes they aren't. + const auto maybeLoadValue = [&]() -> bool { + if (E->isGLValue()) + return true; + if (std::optional<PrimType> T = classify(E)) + return this->emitLoadPop(*T, E); + return false; + }; + + if (const auto *VD = dyn_cast<VarDecl>(Member)) { + // I am almost confident in saying that a var decl must be static + // and therefore registered as a global variable. But this will probably + // turn out to be wrong some time in the future, as always. + if (auto GlobalIndex = P.getGlobal(VD)) + return this->emitGetPtrGlobal(*GlobalIndex, E) && maybeLoadValue(); + return false; + } + + if (!isa<FieldDecl>(Member)) + return this->discard(Base) && this->visitDeclRef(Member, E); + + if (Initializing) { + if (!this->delegate(Base)) + return false; + } else { + if (!this->visit(Base)) + return false; + } + + // Base above gives us a pointer on the stack. + const auto *FD = cast<FieldDecl>(Member); + const RecordDecl *RD = FD->getParent(); + const Record *R = getRecord(RD); + if (!R) + return false; + const Record::Field *F = R->getField(FD); + // Leave a pointer to the field on the stack. + if (F->Decl->getType()->isReferenceType()) + return this->emitGetFieldPop(PT_Ptr, F->Offset, E) && maybeLoadValue(); + return this->emitGetPtrFieldPop(F->Offset, E) && maybeLoadValue(); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitArrayInitIndexExpr(const ArrayInitIndexExpr *E) { + // ArrayIndex might not be set if a ArrayInitIndexExpr is being evaluated + // stand-alone, e.g. via EvaluateAsInt(). + if (!ArrayIndex) + return false; + return this->emitConst(*ArrayIndex, E); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E) { + assert(Initializing); + assert(!DiscardResult); + + // We visit the common opaque expression here once so we have its value + // cached. + if (!this->discard(E->getCommonExpr())) + return false; + + // TODO: This compiles to quite a lot of bytecode if the array is larger. + // Investigate compiling this to a loop. + const Expr *SubExpr = E->getSubExpr(); + size_t Size = E->getArraySize().getZExtValue(); + + // So, every iteration, we execute an assignment here + // where the LHS is on the stack (the target array) + // and the RHS is our SubExpr. + for (size_t I = 0; I != Size; ++I) { + ArrayIndexScope<Emitter> IndexScope(this, I); + BlockScope<Emitter> BS(this); + + if (!this->visitArrayElemInit(I, SubExpr)) + return false; + if (!BS.destroyLocals()) + return false; + } + return true; +} + +template <class Emitter> +bool Compiler<Emitter>::VisitOpaqueValueExpr(const OpaqueValueExpr *E) { + const Expr *SourceExpr = E->getSourceExpr(); + if (!SourceExpr) + return false; + + if (Initializing) + return this->visitInitializer(SourceExpr); + + PrimType SubExprT = classify(SourceExpr).value_or(PT_Ptr); + if (auto It = OpaqueExprs.find(E); It != OpaqueExprs.end()) + return this->emitGetLocal(SubExprT, It->second, E); + + if (!this->visit(SourceExpr)) + return false; + + // At this point we either have the evaluated source expression or a pointer + // to an object on the stack. We want to create a local variable that stores + // this value. + unsigned LocalIndex = allocateLocalPrimitive(E, SubExprT, /*IsConst=*/true); + if (!this->emitSetLocal(SubExprT, LocalIndex, E)) + return false; + + // Here the local variable is created but the value is removed from the stack, + // so we put it back if the caller needs it. + if (!DiscardResult) { + if (!this->emitGetLocal(SubExprT, LocalIndex, E)) + return false; + } + + // This is cleaned up when the local variable is destroyed. + OpaqueExprs.insert({E, LocalIndex}); + + return true; +} + +template <class Emitter> +bool Compiler<Emitter>::VisitAbstractConditionalOperator( + const AbstractConditionalOperator *E) { + const Expr *Condition = E->getCond(); + const Expr *TrueExpr = E->getTrueExpr(); + const Expr *FalseExpr = E->getFalseExpr(); + + LabelTy LabelEnd = this->getLabel(); // Label after the operator. + LabelTy LabelFalse = this->getLabel(); // Label for the false expr. + + if (!this->visitBool(Condition)) + return false; + + if (!this->jumpFalse(LabelFalse)) + return false; + + if (!this->delegate(TrueExpr)) + return false; + if (!this->jump(LabelEnd)) + return false; + + this->emitLabel(LabelFalse); + + if (!this->delegate(FalseExpr)) + return false; + + this->fallthrough(LabelEnd); + this->emitLabel(LabelEnd); + + return true; +} + +template <class Emitter> +bool Compiler<Emitter>::VisitStringLiteral(const StringLiteral *E) { + if (DiscardResult) + return true; + + if (!Initializing) { + unsigned StringIndex = P.createGlobalString(E); + return this->emitGetPtrGlobal(StringIndex, E); + } + + // We are initializing an array on the stack. + const ConstantArrayType *CAT = + Ctx.getASTContext().getAsConstantArrayType(E->getType()); + assert(CAT && "a string literal that's not a constant array?"); + + // If the initializer string is too long, a diagnostic has already been + // emitted. Read only the array length from the string literal. + unsigned ArraySize = CAT->getZExtSize(); + unsigned N = std::min(ArraySize, E->getLength()); + size_t CharWidth = E->getCharByteWidth(); + + for (unsigned I = 0; I != N; ++I) { + uint32_t CodeUnit = E->getCodeUnit(I); + + if (CharWidth == 1) { + this->emitConstSint8(CodeUnit, E); + this->emitInitElemSint8(I, E); + } else if (CharWidth == 2) { + this->emitConstUint16(CodeUnit, E); + this->emitInitElemUint16(I, E); + } else if (CharWidth == 4) { + this->emitConstUint32(CodeUnit, E); + this->emitInitElemUint32(I, E); + } else { + llvm_unreachable("unsupported character width"); + } + } + + // Fill up the rest of the char array with NUL bytes. + for (unsigned I = N; I != ArraySize; ++I) { + if (CharWidth == 1) { + this->emitConstSint8(0, E); + this->emitInitElemSint8(I, E); + } else if (CharWidth == 2) { + this->emitConstUint16(0, E); + this->emitInitElemUint16(I, E); + } else if (CharWidth == 4) { + this->emitConstUint32(0, E); + this->emitInitElemUint32(I, E); + } else { + llvm_unreachable("unsupported character width"); + } + } + + return true; +} + +template <class Emitter> +bool Compiler<Emitter>::VisitObjCStringLiteral(const ObjCStringLiteral *E) { + return this->delegate(E->getString()); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitObjCEncodeExpr(const ObjCEncodeExpr *E) { + auto &A = Ctx.getASTContext(); + std::string Str; + A.getObjCEncodingForType(E->getEncodedType(), Str); + StringLiteral *SL = + StringLiteral::Create(A, Str, StringLiteralKind::Ordinary, + /*Pascal=*/false, E->getType(), E->getAtLoc()); + return this->delegate(SL); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitSYCLUniqueStableNameExpr( + const SYCLUniqueStableNameExpr *E) { + if (DiscardResult) + return true; + + assert(!Initializing); + + auto &A = Ctx.getASTContext(); + std::string ResultStr = E->ComputeName(A); + + QualType CharTy = A.CharTy.withConst(); + APInt Size(A.getTypeSize(A.getSizeType()), ResultStr.size() + 1); + QualType ArrayTy = A.getConstantArrayType(CharTy, Size, nullptr, + ArraySizeModifier::Normal, 0); + + StringLiteral *SL = + StringLiteral::Create(A, ResultStr, StringLiteralKind::Ordinary, + /*Pascal=*/false, ArrayTy, E->getLocation()); + + unsigned StringIndex = P.createGlobalString(SL); + return this->emitGetPtrGlobal(StringIndex, E); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitCharacterLiteral(const CharacterLiteral *E) { + if (DiscardResult) + return true; + return this->emitConst(E->getValue(), E); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitFloatCompoundAssignOperator( + const CompoundAssignOperator *E) { + + const Expr *LHS = E->getLHS(); + const Expr *RHS = E->getRHS(); + QualType LHSType = LHS->getType(); + QualType LHSComputationType = E->getComputationLHSType(); + QualType ResultType = E->getComputationResultType(); + std::optional<PrimType> LT = classify(LHSComputationType); + std::optional<PrimType> RT = classify(ResultType); + + assert(ResultType->isFloatingType()); + + if (!LT || !RT) + return false; + + PrimType LHST = classifyPrim(LHSType); + + // C++17 onwards require that we evaluate the RHS first. + // Compute RHS and save it in a temporary variable so we can + // load it again later. + if (!visit(RHS)) + return false; + + unsigned TempOffset = this->allocateLocalPrimitive(E, *RT, /*IsConst=*/true); + if (!this->emitSetLocal(*RT, TempOffset, E)) + return false; + + // First, visit LHS. + if (!visit(LHS)) + return false; + if (!this->emitLoad(LHST, E)) + return false; + + // If necessary, convert LHS to its computation type. + if (!this->emitPrimCast(LHST, classifyPrim(LHSComputationType), + LHSComputationType, E)) + return false; + + // Now load RHS. + if (!this->emitGetLocal(*RT, TempOffset, E)) + return false; + + llvm::RoundingMode RM = getRoundingMode(E); + switch (E->getOpcode()) { + case BO_AddAssign: + if (!this->emitAddf(RM, E)) + return false; + break; + case BO_SubAssign: + if (!this->emitSubf(RM, E)) + return false; + break; + case BO_MulAssign: + if (!this->emitMulf(RM, E)) + return false; + break; + case BO_DivAssign: + if (!this->emitDivf(RM, E)) + return false; + break; + default: + return false; + } + + if (!this->emitPrimCast(classifyPrim(ResultType), LHST, LHS->getType(), E)) + return false; + + if (DiscardResult) + return this->emitStorePop(LHST, E); + return this->emitStore(LHST, E); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitPointerCompoundAssignOperator( + const CompoundAssignOperator *E) { + BinaryOperatorKind Op = E->getOpcode(); + const Expr *LHS = E->getLHS(); + const Expr *RHS = E->getRHS(); + std::optional<PrimType> LT = classify(LHS->getType()); + std::optional<PrimType> RT = classify(RHS->getType()); + + if (Op != BO_AddAssign && Op != BO_SubAssign) + return false; + + if (!LT || !RT) + return false; + + if (!visit(LHS)) + return false; + + if (!this->emitLoad(*LT, LHS)) + return false; + + if (!visit(RHS)) + return false; + + if (Op == BO_AddAssign) { + if (!this->emitAddOffset(*RT, E)) + return false; + } else { + if (!this->emitSubOffset(*RT, E)) + return false; + } + + if (DiscardResult) + return this->emitStorePopPtr(E); + return this->emitStorePtr(E); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitCompoundAssignOperator( + const CompoundAssignOperator *E) { + + const Expr *LHS = E->getLHS(); + const Expr *RHS = E->getRHS(); + std::optional<PrimType> LHSComputationT = + classify(E->getComputationLHSType()); + std::optional<PrimType> LT = classify(LHS->getType()); + std::optional<PrimType> RT = classify(RHS->getType()); + std::optional<PrimType> ResultT = classify(E->getType()); + + if (!Ctx.getLangOpts().CPlusPlus14) + return this->visit(RHS) && this->visit(LHS) && this->emitError(E); + + if (!LT || !RT || !ResultT || !LHSComputationT) + return false; + + // Handle floating point operations separately here, since they + // require special care. + + if (ResultT == PT_Float || RT == PT_Float) + return VisitFloatCompoundAssignOperator(E); + + if (E->getType()->isPointerType()) + return VisitPointerCompoundAssignOperator(E); + + assert(!E->getType()->isPointerType() && "Handled above"); + assert(!E->getType()->isFloatingType() && "Handled above"); + + // C++17 onwards require that we evaluate the RHS first. + // Compute RHS and save it in a temporary variable so we can + // load it again later. + // FIXME: Compound assignments are unsequenced in C, so we might + // have to figure out how to reject them. + if (!visit(RHS)) + return false; + + unsigned TempOffset = this->allocateLocalPrimitive(E, *RT, /*IsConst=*/true); + + if (!this->emitSetLocal(*RT, TempOffset, E)) + return false; + + // Get LHS pointer, load its value and cast it to the + // computation type if necessary. + if (!visit(LHS)) + return false; + if (!this->emitLoad(*LT, E)) + return false; + if (LT != LHSComputationT) { + if (!this->emitCast(*LT, *LHSComputationT, E)) + return false; + } + + // Get the RHS value on the stack. + if (!this->emitGetLocal(*RT, TempOffset, E)) + return false; + + // Perform operation. + switch (E->getOpcode()) { + case BO_AddAssign: + if (!this->emitAdd(*LHSComputationT, E)) + return false; + break; + case BO_SubAssign: + if (!this->emitSub(*LHSComputationT, E)) + return false; + break; + case BO_MulAssign: + if (!this->emitMul(*LHSComputationT, E)) + return false; + break; + case BO_DivAssign: + if (!this->emitDiv(*LHSComputationT, E)) + return false; + break; + case BO_RemAssign: + if (!this->emitRem(*LHSComputationT, E)) + return false; + break; + case BO_ShlAssign: + if (!this->emitShl(*LHSComputationT, *RT, E)) + return false; + break; + case BO_ShrAssign: + if (!this->emitShr(*LHSComputationT, *RT, E)) + return false; + break; + case BO_AndAssign: + if (!this->emitBitAnd(*LHSComputationT, E)) + return false; + break; + case BO_XorAssign: + if (!this->emitBitXor(*LHSComputationT, E)) + return false; + break; + case BO_OrAssign: + if (!this->emitBitOr(*LHSComputationT, E)) + return false; + break; + default: + llvm_unreachable("Unimplemented compound assign operator"); + } + + // And now cast from LHSComputationT to ResultT. + if (ResultT != LHSComputationT) { + if (!this->emitCast(*LHSComputationT, *ResultT, E)) + return false; + } + + // And store the result in LHS. + if (DiscardResult) { + if (LHS->refersToBitField()) + return this->emitStoreBitFieldPop(*ResultT, E); + return this->emitStorePop(*ResultT, E); + } + if (LHS->refersToBitField()) + return this->emitStoreBitField(*ResultT, E); + return this->emitStore(*ResultT, E); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitExprWithCleanups(const ExprWithCleanups *E) { + LocalScope<Emitter> ES(this); + const Expr *SubExpr = E->getSubExpr(); + + assert(E->getNumObjects() == 0 && "TODO: Implement cleanups"); + + return this->delegate(SubExpr) && ES.destroyLocals(); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitMaterializeTemporaryExpr( + const MaterializeTemporaryExpr *E) { + const Expr *SubExpr = E->getSubExpr(); + + if (Initializing) { + // We already have a value, just initialize that. + return this->delegate(SubExpr); + } + // If we don't end up using the materialized temporary anyway, don't + // bother creating it. + if (DiscardResult) + return this->discard(SubExpr); + + // When we're initializing a global variable *or* the storage duration of + // the temporary is explicitly static, create a global variable. + std::optional<PrimType> SubExprT = classify(SubExpr); + bool IsStatic = E->getStorageDuration() == SD_Static; + if (GlobalDecl || IsStatic) { + std::optional<unsigned> GlobalIndex = P.createGlobal(E); + if (!GlobalIndex) + return false; + + const LifetimeExtendedTemporaryDecl *TempDecl = + E->getLifetimeExtendedTemporaryDecl(); + if (IsStatic) + assert(TempDecl); + + if (SubExprT) { + if (!this->visit(SubExpr)) + return false; + if (IsStatic) { + if (!this->emitInitGlobalTemp(*SubExprT, *GlobalIndex, TempDecl, E)) + return false; + } else { + if (!this->emitInitGlobal(*SubExprT, *GlobalIndex, E)) + return false; + } + return this->emitGetPtrGlobal(*GlobalIndex, E); + } + + // Non-primitive values. + if (!this->emitGetPtrGlobal(*GlobalIndex, E)) + return false; + if (!this->visitInitializer(SubExpr)) + return false; + if (IsStatic) + return this->emitInitGlobalTempComp(TempDecl, E); + return true; + } + + // For everyhing else, use local variables. + if (SubExprT) { + unsigned LocalIndex = allocateLocalPrimitive( + SubExpr, *SubExprT, /*IsConst=*/true, /*IsExtended=*/true); + if (!this->visit(SubExpr)) + return false; + if (!this->emitSetLocal(*SubExprT, LocalIndex, E)) + return false; + return this->emitGetPtrLocal(LocalIndex, E); + } else { + const Expr *Inner = E->getSubExpr()->skipRValueSubobjectAdjustments(); + if (std::optional<unsigned> LocalIndex = + allocateLocal(Inner, E->getExtendingDecl())) { + InitLinkScope<Emitter> ILS(this, InitLink::Temp(*LocalIndex)); + if (!this->emitGetPtrLocal(*LocalIndex, E)) + return false; + return this->visitInitializer(SubExpr); + } + } + return false; +} + +template <class Emitter> +bool Compiler<Emitter>::VisitCXXBindTemporaryExpr( + const CXXBindTemporaryExpr *E) { + return this->delegate(E->getSubExpr()); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) { + const Expr *Init = E->getInitializer(); + if (Initializing) { + // We already have a value, just initialize that. + return this->visitInitializer(Init) && this->emitFinishInit(E); + } + + std::optional<PrimType> T = classify(E->getType()); + if (E->isFileScope()) { + // Avoid creating a variable if this is a primitive RValue anyway. + if (T && !E->isLValue()) + return this->delegate(Init); + + if (std::optional<unsigned> GlobalIndex = P.createGlobal(E)) { + if (!this->emitGetPtrGlobal(*GlobalIndex, E)) + return false; + + if (T) { + if (!this->visit(Init)) + return false; + return this->emitInitGlobal(*T, *GlobalIndex, E); + } + + return this->visitInitializer(Init) && this->emitFinishInit(E); + } + + return false; + } + + // Otherwise, use a local variable. + if (T && !E->isLValue()) { + // For primitive types, we just visit the initializer. + return this->delegate(Init); + } else { + unsigned LocalIndex; + + if (T) + LocalIndex = this->allocateLocalPrimitive(Init, *T, false, false); + else if (std::optional<unsigned> MaybeIndex = this->allocateLocal(Init)) + LocalIndex = *MaybeIndex; + else + return false; + + if (!this->emitGetPtrLocal(LocalIndex, E)) + return false; + + if (T) { + if (!this->visit(Init)) { + return false; + } + return this->emitInit(*T, E); + } else { + if (!this->visitInitializer(Init) || !this->emitFinishInit(E)) + return false; + } + + if (DiscardResult) + return this->emitPopPtr(E); + return true; + } + + return false; +} + +template <class Emitter> +bool Compiler<Emitter>::VisitTypeTraitExpr(const TypeTraitExpr *E) { + if (DiscardResult) + return true; + if (E->getType()->isBooleanType()) + return this->emitConstBool(E->getValue(), E); + return this->emitConst(E->getValue(), E); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) { + if (DiscardResult) + return true; + return this->emitConst(E->getValue(), E); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitLambdaExpr(const LambdaExpr *E) { + if (DiscardResult) + return true; + + assert(Initializing); + const Record *R = P.getOrCreateRecord(E->getLambdaClass()); + + auto *CaptureInitIt = E->capture_init_begin(); + // Initialize all fields (which represent lambda captures) of the + // record with their initializers. + for (const Record::Field &F : R->fields()) { + const Expr *Init = *CaptureInitIt; + ++CaptureInitIt; + + if (!Init) + continue; + + if (std::optional<PrimType> T = classify(Init)) { + if (!this->visit(Init)) + return false; + + if (!this->emitInitField(*T, F.Offset, E)) + return false; + } else { + if (!this->emitGetPtrField(F.Offset, E)) + return false; + + if (!this->visitInitializer(Init)) + return false; + + if (!this->emitPopPtr(E)) + return false; + } + } + + return true; +} + +template <class Emitter> +bool Compiler<Emitter>::VisitPredefinedExpr(const PredefinedExpr *E) { + if (DiscardResult) + return true; + + return this->delegate(E->getFunctionName()); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitCXXThrowExpr(const CXXThrowExpr *E) { + if (E->getSubExpr() && !this->discard(E->getSubExpr())) + return false; + + return this->emitInvalid(E); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitCXXReinterpretCastExpr( + const CXXReinterpretCastExpr *E) { + if (!this->discard(E->getSubExpr())) + return false; + + return this->emitInvalidCast(CastKind::Reinterpret, E); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) { + assert(E->getType()->isBooleanType()); + + if (DiscardResult) + return true; + return this->emitConstBool(E->getValue(), E); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitCXXConstructExpr(const CXXConstructExpr *E) { + QualType T = E->getType(); + assert(!classify(T)); + + if (T->isRecordType()) { + const CXXConstructorDecl *Ctor = E->getConstructor(); + + // Trivial copy/move constructor. Avoid copy. + if (Ctor->isDefaulted() && Ctor->isCopyOrMoveConstructor() && + Ctor->isTrivial() && + E->getArg(0)->isTemporaryObject(Ctx.getASTContext(), + T->getAsCXXRecordDecl())) + return this->visitInitializer(E->getArg(0)); + + // If we're discarding a construct expression, we still need + // to allocate a variable and call the constructor and destructor. + if (DiscardResult) { + if (Ctor->isTrivial()) + return true; + assert(!Initializing); + std::optional<unsigned> LocalIndex = allocateLocal(E); + + if (!LocalIndex) + return false; + + if (!this->emitGetPtrLocal(*LocalIndex, E)) + return false; + } + + // Zero initialization. + if (E->requiresZeroInitialization()) { + const Record *R = getRecord(E->getType()); + + if (!this->visitZeroRecordInitializer(R, E)) + return false; + + // If the constructor is trivial anyway, we're done. + if (Ctor->isTrivial()) + return true; + } + + const Function *Func = getFunction(Ctor); + + if (!Func) + return false; + + assert(Func->hasThisPointer()); + assert(!Func->hasRVO()); + + // The This pointer is already on the stack because this is an initializer, + // but we need to dup() so the call() below has its own copy. + if (!this->emitDupPtr(E)) + return false; + + // Constructor arguments. + for (const auto *Arg : E->arguments()) { + if (!this->visit(Arg)) + return false; + } + + if (Func->isVariadic()) { + uint32_t VarArgSize = 0; + unsigned NumParams = Func->getNumWrittenParams(); + for (unsigned I = NumParams, N = E->getNumArgs(); I != N; ++I) { + VarArgSize += + align(primSize(classify(E->getArg(I)->getType()).value_or(PT_Ptr))); + } + if (!this->emitCallVar(Func, VarArgSize, E)) + return false; + } else { + if (!this->emitCall(Func, 0, E)) + return false; + } + + // Immediately call the destructor if we have to. + if (DiscardResult) { + if (!this->emitRecordDestruction(getRecord(E->getType()))) + return false; + if (!this->emitPopPtr(E)) + return false; + } + return true; + } + + if (T->isArrayType()) { + const ConstantArrayType *CAT = + Ctx.getASTContext().getAsConstantArrayType(E->getType()); + if (!CAT) + return false; + + size_t NumElems = CAT->getZExtSize(); + const Function *Func = getFunction(E->getConstructor()); + if (!Func || !Func->isConstexpr()) + return false; + + // FIXME(perf): We're calling the constructor once per array element here, + // in the old intepreter we had a special-case for trivial constructors. + for (size_t I = 0; I != NumElems; ++I) { + if (!this->emitConstUint64(I, E)) + return false; + if (!this->emitArrayElemPtrUint64(E)) + return false; + + // Constructor arguments. + for (const auto *Arg : E->arguments()) { + if (!this->visit(Arg)) + return false; + } + + if (!this->emitCall(Func, 0, E)) + return false; + } + return true; + } + + return false; +} + +template <class Emitter> +bool Compiler<Emitter>::VisitSourceLocExpr(const SourceLocExpr *E) { + if (DiscardResult) + return true; + + const APValue Val = + E->EvaluateInContext(Ctx.getASTContext(), SourceLocDefaultExpr); + + // Things like __builtin_LINE(). + if (E->getType()->isIntegerType()) { + assert(Val.isInt()); + const APSInt &I = Val.getInt(); + return this->emitConst(I, E); + } + // Otherwise, the APValue is an LValue, with only one element. + // Theoretically, we don't need the APValue at all of course. + assert(E->getType()->isPointerType()); + assert(Val.isLValue()); + const APValue::LValueBase &Base = Val.getLValueBase(); + if (const Expr *LValueExpr = Base.dyn_cast<const Expr *>()) + return this->visit(LValueExpr); + + // Otherwise, we have a decl (which is the case for + // __builtin_source_location). + assert(Base.is<const ValueDecl *>()); + assert(Val.getLValuePath().size() == 0); + const auto *BaseDecl = Base.dyn_cast<const ValueDecl *>(); + assert(BaseDecl); + + auto *UGCD = cast<UnnamedGlobalConstantDecl>(BaseDecl); + + std::optional<unsigned> GlobalIndex = P.getOrCreateGlobal(UGCD); + if (!GlobalIndex) + return false; + + if (!this->emitGetPtrGlobal(*GlobalIndex, E)) + return false; + + const Record *R = getRecord(E->getType()); + const APValue &V = UGCD->getValue(); + for (unsigned I = 0, N = R->getNumFields(); I != N; ++I) { + const Record::Field *F = R->getField(I); + const APValue &FieldValue = V.getStructField(I); + + PrimType FieldT = classifyPrim(F->Decl->getType()); + + if (!this->visitAPValue(FieldValue, FieldT, E)) + return false; + if (!this->emitInitField(FieldT, F->Offset, E)) + return false; + } + + // Leave the pointer to the global on the stack. + return true; +} + +template <class Emitter> +bool Compiler<Emitter>::VisitOffsetOfExpr(const OffsetOfExpr *E) { + unsigned N = E->getNumComponents(); + if (N == 0) + return false; + + for (unsigned I = 0; I != N; ++I) { + const OffsetOfNode &Node = E->getComponent(I); + if (Node.getKind() == OffsetOfNode::Array) { + const Expr *ArrayIndexExpr = E->getIndexExpr(Node.getArrayExprIndex()); + PrimType IndexT = classifyPrim(ArrayIndexExpr->getType()); + + if (DiscardResult) { + if (!this->discard(ArrayIndexExpr)) + return false; + continue; + } + + if (!this->visit(ArrayIndexExpr)) + return false; + // Cast to Sint64. + if (IndexT != PT_Sint64) { + if (!this->emitCast(IndexT, PT_Sint64, E)) + return false; + } + } + } + + if (DiscardResult) + return true; + + PrimType T = classifyPrim(E->getType()); + return this->emitOffsetOf(T, E, E); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitCXXScalarValueInitExpr( + const CXXScalarValueInitExpr *E) { + QualType Ty = E->getType(); + + if (DiscardResult || Ty->isVoidType()) + return true; + + if (std::optional<PrimType> T = classify(Ty)) + return this->visitZeroInitializer(*T, Ty, E); + + if (const auto *CT = Ty->getAs<ComplexType>()) { + if (!Initializing) { + std::optional<unsigned> LocalIndex = allocateLocal(E); + if (!LocalIndex) + return false; + if (!this->emitGetPtrLocal(*LocalIndex, E)) + return false; + } + + // Initialize both fields to 0. + QualType ElemQT = CT->getElementType(); + PrimType ElemT = classifyPrim(ElemQT); + + for (unsigned I = 0; I != 2; ++I) { + if (!this->visitZeroInitializer(ElemT, ElemQT, E)) + return false; + if (!this->emitInitElem(ElemT, I, E)) + return false; + } + return true; + } + + if (const auto *VT = Ty->getAs<VectorType>()) { + // FIXME: Code duplication with the _Complex case above. + if (!Initializing) { + std::optional<unsigned> LocalIndex = allocateLocal(E); + if (!LocalIndex) + return false; + if (!this->emitGetPtrLocal(*LocalIndex, E)) + return false; + } + + // Initialize all fields to 0. + QualType ElemQT = VT->getElementType(); + PrimType ElemT = classifyPrim(ElemQT); + + for (unsigned I = 0, N = VT->getNumElements(); I != N; ++I) { + if (!this->visitZeroInitializer(ElemT, ElemQT, E)) + return false; + if (!this->emitInitElem(ElemT, I, E)) + return false; + } + return true; + } + + return false; +} + +template <class Emitter> +bool Compiler<Emitter>::VisitSizeOfPackExpr(const SizeOfPackExpr *E) { + return this->emitConst(E->getPackLength(), E); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitGenericSelectionExpr( + const GenericSelectionExpr *E) { + return this->delegate(E->getResultExpr()); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitChooseExpr(const ChooseExpr *E) { + return this->delegate(E->getChosenSubExpr()); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) { + if (DiscardResult) + return true; + + return this->emitConst(E->getValue(), E); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitCXXInheritedCtorInitExpr( + const CXXInheritedCtorInitExpr *E) { + const CXXConstructorDecl *Ctor = E->getConstructor(); + assert(!Ctor->isTrivial() && + "Trivial CXXInheritedCtorInitExpr, implement. (possible?)"); + const Function *F = this->getFunction(Ctor); + assert(F); + assert(!F->hasRVO()); + assert(F->hasThisPointer()); + + if (!this->emitDupPtr(SourceInfo{})) + return false; + + // Forward all arguments of the current function (which should be a + // constructor itself) to the inherited ctor. + // This is necessary because the calling code has pushed the pointer + // of the correct base for us already, but the arguments need + // to come after. + unsigned Offset = align(primSize(PT_Ptr)); // instance pointer. + for (const ParmVarDecl *PD : Ctor->parameters()) { + PrimType PT = this->classify(PD->getType()).value_or(PT_Ptr); + + if (!this->emitGetParam(PT, Offset, E)) + return false; + Offset += align(primSize(PT)); + } + + return this->emitCall(F, 0, E); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitCXXNewExpr(const CXXNewExpr *E) { + assert(classifyPrim(E->getType()) == PT_Ptr); + const Expr *Init = E->getInitializer(); + QualType ElementType = E->getAllocatedType(); + std::optional<PrimType> ElemT = classify(ElementType); + unsigned PlacementArgs = E->getNumPlacementArgs(); + bool IsNoThrow = false; + + // FIXME: Better diagnostic. diag::note_constexpr_new_placement + if (PlacementArgs != 0) { + // The only new-placement list we support is of the form (std::nothrow). + // + // FIXME: There is no restriction on this, but it's not clear that any + // other form makes any sense. We get here for cases such as: + // + // new (std::align_val_t{N}) X(int) + // + // (which should presumably be valid only if N is a multiple of + // alignof(int), and in any case can't be deallocated unless N is + // alignof(X) and X has new-extended alignment). + if (PlacementArgs != 1 || !E->getPlacementArg(0)->getType()->isNothrowT()) + return this->emitInvalid(E); + + if (!this->discard(E->getPlacementArg(0))) + return false; + IsNoThrow = true; + } + + const Descriptor *Desc; + if (ElemT) { + if (E->isArray()) + Desc = nullptr; // We're not going to use it in this case. + else + Desc = P.createDescriptor(E, *ElemT, Descriptor::InlineDescMD, + /*IsConst=*/false, /*IsTemporary=*/false, + /*IsMutable=*/false); + } else { + Desc = P.createDescriptor( + E, ElementType.getTypePtr(), + E->isArray() ? std::nullopt : Descriptor::InlineDescMD, + /*IsConst=*/false, /*IsTemporary=*/false, /*IsMutable=*/false, Init); + } + + if (E->isArray()) { + std::optional<const Expr *> ArraySizeExpr = E->getArraySize(); + if (!ArraySizeExpr) + return false; + + const Expr *Stripped = *ArraySizeExpr; + for (; auto *ICE = dyn_cast<ImplicitCastExpr>(Stripped); + Stripped = ICE->getSubExpr()) + if (ICE->getCastKind() != CK_NoOp && + ICE->getCastKind() != CK_IntegralCast) + break; + + PrimType SizeT = classifyPrim(Stripped->getType()); + + if (!this->visit(Stripped)) + return false; + + if (ElemT) { + // N primitive elements. + if (!this->emitAllocN(SizeT, *ElemT, E, IsNoThrow, E)) + return false; + } else { + // N Composite elements. + if (!this->emitAllocCN(SizeT, Desc, IsNoThrow, E)) + return false; + } + + if (Init && !this->visitInitializer(Init)) + return false; + + } else { + // Allocate just one element. + if (!this->emitAlloc(Desc, E)) + return false; + + if (Init) { + if (ElemT) { + if (!this->visit(Init)) + return false; + + if (!this->emitInit(*ElemT, E)) + return false; + } else { + // Composite. + if (!this->visitInitializer(Init)) + return false; + } + } + } + + if (DiscardResult) + return this->emitPopPtr(E); + + return true; +} + +template <class Emitter> +bool Compiler<Emitter>::VisitCXXDeleteExpr(const CXXDeleteExpr *E) { + const Expr *Arg = E->getArgument(); + + // Arg must be an lvalue. + if (!this->visit(Arg)) + return false; + + return this->emitFree(E->isArrayForm(), E); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitExpressionTraitExpr(const ExpressionTraitExpr *E) { + assert(Ctx.getLangOpts().CPlusPlus); + return this->emitConstBool(E->getValue(), E); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitCXXUuidofExpr(const CXXUuidofExpr *E) { + if (DiscardResult) + return true; + assert(!Initializing); + + const MSGuidDecl *GuidDecl = E->getGuidDecl(); + const RecordDecl *RD = GuidDecl->getType()->getAsRecordDecl(); + assert(RD); + // If the definiton of the result type is incomplete, just return a dummy. + // If (and when) that is read from, we will fail, but not now. + if (!RD->isCompleteDefinition()) { + if (std::optional<unsigned> I = P.getOrCreateDummy(GuidDecl)) + return this->emitGetPtrGlobal(*I, E); + return false; + } + + std::optional<unsigned> GlobalIndex = P.getOrCreateGlobal(GuidDecl); + if (!GlobalIndex) + return false; + if (!this->emitGetPtrGlobal(*GlobalIndex, E)) + return false; + + assert(this->getRecord(E->getType())); + + const APValue &V = GuidDecl->getAsAPValue(); + if (V.getKind() == APValue::None) + return true; + + assert(V.isStruct()); + assert(V.getStructNumBases() == 0); + if (!this->visitAPValueInitializer(V, E)) + return false; + + return this->emitFinishInit(E); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitRequiresExpr(const RequiresExpr *E) { + assert(classifyPrim(E->getType()) == PT_Bool); + if (DiscardResult) + return true; + return this->emitConstBool(E->isSatisfied(), E); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitConceptSpecializationExpr( + const ConceptSpecializationExpr *E) { + assert(classifyPrim(E->getType()) == PT_Bool); + if (DiscardResult) + return true; + return this->emitConstBool(E->isSatisfied(), E); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitCXXRewrittenBinaryOperator( + const CXXRewrittenBinaryOperator *E) { + return this->delegate(E->getSemanticForm()); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitPseudoObjectExpr(const PseudoObjectExpr *E) { + + for (const Expr *SemE : E->semantics()) { + if (auto *OVE = dyn_cast<OpaqueValueExpr>(SemE)) { + if (SemE == E->getResultExpr()) + return false; + + if (OVE->isUnique()) + continue; + + if (!this->discard(OVE)) + return false; + } else if (SemE == E->getResultExpr()) { + if (!this->delegate(SemE)) + return false; + } else { + if (!this->discard(SemE)) + return false; + } + } + return true; +} + +template <class Emitter> +bool Compiler<Emitter>::VisitPackIndexingExpr(const PackIndexingExpr *E) { + return this->delegate(E->getSelectedExpr()); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitRecoveryExpr(const RecoveryExpr *E) { + return this->emitError(E); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitAddrLabelExpr(const AddrLabelExpr *E) { + assert(E->getType()->isVoidPointerType()); + + unsigned Offset = allocateLocalPrimitive( + E->getLabel(), PT_Ptr, /*IsConst=*/true, /*IsExtended=*/false); + + return this->emitGetLocal(PT_Ptr, Offset, E); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitConvertVectorExpr(const ConvertVectorExpr *E) { + assert(Initializing); + const auto *VT = E->getType()->castAs<VectorType>(); + QualType ElemType = VT->getElementType(); + PrimType ElemT = classifyPrim(ElemType); + const Expr *Src = E->getSrcExpr(); + PrimType SrcElemT = + classifyPrim(Src->getType()->castAs<VectorType>()->getElementType()); + + unsigned SrcOffset = this->allocateLocalPrimitive(Src, PT_Ptr, true, false); + if (!this->visit(Src)) + return false; + if (!this->emitSetLocal(PT_Ptr, SrcOffset, E)) + return false; + + for (unsigned I = 0; I != VT->getNumElements(); ++I) { + if (!this->emitGetLocal(PT_Ptr, SrcOffset, E)) + return false; + if (!this->emitArrayElemPop(SrcElemT, I, E)) + return false; + if (SrcElemT != ElemT) { + if (!this->emitPrimCast(SrcElemT, ElemT, ElemType, E)) + return false; + } + if (!this->emitInitElem(ElemT, I, E)) + return false; + } + + return true; +} + +template <class Emitter> +bool Compiler<Emitter>::VisitShuffleVectorExpr(const ShuffleVectorExpr *E) { + assert(Initializing); + assert(E->getNumSubExprs() > 2); + + const Expr *Vecs[] = {E->getExpr(0), E->getExpr(1)}; + const VectorType *VT = Vecs[0]->getType()->castAs<VectorType>(); + PrimType ElemT = classifyPrim(VT->getElementType()); + unsigned NumInputElems = VT->getNumElements(); + unsigned NumOutputElems = E->getNumSubExprs() - 2; + assert(NumOutputElems > 0); + + // Save both input vectors to a local variable. + unsigned VectorOffsets[2]; + for (unsigned I = 0; I != 2; ++I) { + VectorOffsets[I] = this->allocateLocalPrimitive( + Vecs[I], PT_Ptr, /*IsConst=*/true, /*IsExtended=*/false); + if (!this->visit(Vecs[I])) + return false; + if (!this->emitSetLocal(PT_Ptr, VectorOffsets[I], E)) + return false; + } + for (unsigned I = 0; I != NumOutputElems; ++I) { + APSInt ShuffleIndex = E->getShuffleMaskIdx(Ctx.getASTContext(), I); + if (ShuffleIndex == -1) + return this->emitInvalid(E); // FIXME: Better diagnostic. + + assert(ShuffleIndex < (NumInputElems * 2)); + if (!this->emitGetLocal(PT_Ptr, + VectorOffsets[ShuffleIndex >= NumInputElems], E)) + return false; + unsigned InputVectorIndex = ShuffleIndex.getZExtValue() % NumInputElems; + if (!this->emitArrayElemPop(ElemT, InputVectorIndex, E)) + return false; + + if (!this->emitInitElem(ElemT, I, E)) + return false; + } + + return true; +} + +template <class Emitter> +bool Compiler<Emitter>::VisitExtVectorElementExpr( + const ExtVectorElementExpr *E) { + const Expr *Base = E->getBase(); + assert( + Base->getType()->isVectorType() || + Base->getType()->getAs<PointerType>()->getPointeeType()->isVectorType()); + + SmallVector<uint32_t, 4> Indices; + E->getEncodedElementAccess(Indices); + + if (Indices.size() == 1) { + if (!this->visit(Base)) + return false; + + if (E->isGLValue()) { + if (!this->emitConstUint32(Indices[0], E)) + return false; + return this->emitArrayElemPtrPop(PT_Uint32, E); + } + // Else, also load the value. + return this->emitArrayElemPop(classifyPrim(E->getType()), Indices[0], E); + } + + // Create a local variable for the base. + unsigned BaseOffset = allocateLocalPrimitive(Base, PT_Ptr, /*IsConst=*/true, + /*IsExtended=*/false); + if (!this->visit(Base)) + return false; + if (!this->emitSetLocal(PT_Ptr, BaseOffset, E)) + return false; + + // Now the vector variable for the return value. + if (!Initializing) { + std::optional<unsigned> ResultIndex; + ResultIndex = allocateLocal(E); + if (!ResultIndex) + return false; + if (!this->emitGetPtrLocal(*ResultIndex, E)) + return false; + } + + assert(Indices.size() == E->getType()->getAs<VectorType>()->getNumElements()); + + PrimType ElemT = + classifyPrim(E->getType()->getAs<VectorType>()->getElementType()); + uint32_t DstIndex = 0; + for (uint32_t I : Indices) { + if (!this->emitGetLocal(PT_Ptr, BaseOffset, E)) + return false; + if (!this->emitArrayElemPop(ElemT, I, E)) + return false; + if (!this->emitInitElem(ElemT, DstIndex, E)) + return false; + ++DstIndex; + } + + // Leave the result pointer on the stack. + assert(!DiscardResult); + return true; +} + +template <class Emitter> +bool Compiler<Emitter>::VisitObjCBoxedExpr(const ObjCBoxedExpr *E) { + if (!E->isExpressibleAsConstantInitializer()) + return this->emitInvalid(E); + + return this->delegate(E->getSubExpr()); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitCXXStdInitializerListExpr( + const CXXStdInitializerListExpr *E) { + const Expr *SubExpr = E->getSubExpr(); + const ConstantArrayType *ArrayType = + Ctx.getASTContext().getAsConstantArrayType(SubExpr->getType()); + const Record *R = getRecord(E->getType()); + assert(Initializing); + assert(SubExpr->isGLValue()); + + if (!this->visit(SubExpr)) + return false; + if (!this->emitInitFieldPtr(R->getField(0u)->Offset, E)) + return false; + + PrimType SecondFieldT = classifyPrim(R->getField(1u)->Decl->getType()); + if (isIntegralType(SecondFieldT)) { + if (!this->emitConst(static_cast<APSInt>(ArrayType->getSize()), + SecondFieldT, E)) + return false; + return this->emitInitField(SecondFieldT, R->getField(1u)->Offset, E); + } + assert(SecondFieldT == PT_Ptr); + + if (!this->emitGetFieldPtr(R->getField(0u)->Offset, E)) + return false; + if (!this->emitConst(static_cast<APSInt>(ArrayType->getSize()), PT_Uint64, E)) + return false; + if (!this->emitArrayElemPtrPop(PT_Uint64, E)) + return false; + return this->emitInitFieldPtr(R->getField(1u)->Offset, E); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitStmtExpr(const StmtExpr *E) { + BlockScope<Emitter> BS(this); + StmtExprScope<Emitter> SS(this); + + const CompoundStmt *CS = E->getSubStmt(); + const Stmt *Result = CS->getStmtExprResult(); + for (const Stmt *S : CS->body()) { + if (S != Result) { + if (!this->visitStmt(S)) + return false; + continue; + } + + assert(S == Result); + if (const Expr *ResultExpr = dyn_cast<Expr>(S)) { + if (DiscardResult) + return this->discard(ResultExpr); + return this->delegate(ResultExpr); + } + + return this->visitStmt(S); + } + + return BS.destroyLocals(); +} + +template <class Emitter> bool Compiler<Emitter>::discard(const Expr *E) { + OptionScope<Emitter> Scope(this, /*NewDiscardResult=*/true, + /*NewInitializing=*/false); + return this->Visit(E); +} + +template <class Emitter> bool Compiler<Emitter>::delegate(const Expr *E) { + if (E->containsErrors()) + return this->emitError(E); + + // We're basically doing: + // OptionScope<Emitter> Scope(this, DicardResult, Initializing); + // but that's unnecessary of course. + return this->Visit(E); +} + +template <class Emitter> bool Compiler<Emitter>::visit(const Expr *E) { + if (E->getType().isNull()) + return false; + + if (E->getType()->isVoidType()) + return this->discard(E); + + // Create local variable to hold the return value. + if (!E->isGLValue() && !E->getType()->isAnyComplexType() && + !classify(E->getType())) { + std::optional<unsigned> LocalIndex = allocateLocal(E); + if (!LocalIndex) + return false; + + if (!this->emitGetPtrLocal(*LocalIndex, E)) + return false; + return this->visitInitializer(E); + } + + // Otherwise,we have a primitive return value, produce the value directly + // and push it on the stack. + OptionScope<Emitter> Scope(this, /*NewDiscardResult=*/false, + /*NewInitializing=*/false); + return this->Visit(E); +} + +template <class Emitter> +bool Compiler<Emitter>::visitInitializer(const Expr *E) { + assert(!classify(E->getType())); + + if (E->containsErrors()) + return this->emitError(E); + + OptionScope<Emitter> Scope(this, /*NewDiscardResult=*/false, + /*NewInitializing=*/true); + return this->Visit(E); +} + +template <class Emitter> bool Compiler<Emitter>::visitBool(const Expr *E) { + std::optional<PrimType> T = classify(E->getType()); + if (!T) { + // Convert complex values to bool. + if (E->getType()->isAnyComplexType()) { + if (!this->visit(E)) + return false; + return this->emitComplexBoolCast(E); + } + return false; + } + + if (!this->visit(E)) + return false; + + if (T == PT_Bool) + return true; + + // Convert pointers to bool. + if (T == PT_Ptr || T == PT_FnPtr) { + if (!this->emitNull(*T, nullptr, E)) + return false; + return this->emitNE(*T, E); + } + + // Or Floats. + if (T == PT_Float) + return this->emitCastFloatingIntegralBool(E); + + // Or anything else we can. + return this->emitCast(*T, PT_Bool, E); +} + +template <class Emitter> +bool Compiler<Emitter>::visitZeroInitializer(PrimType T, QualType QT, + const Expr *E) { + switch (T) { + case PT_Bool: + return this->emitZeroBool(E); + case PT_Sint8: + return this->emitZeroSint8(E); + case PT_Uint8: + return this->emitZeroUint8(E); + case PT_Sint16: + return this->emitZeroSint16(E); + case PT_Uint16: + return this->emitZeroUint16(E); + case PT_Sint32: + return this->emitZeroSint32(E); + case PT_Uint32: + return this->emitZeroUint32(E); + case PT_Sint64: + return this->emitZeroSint64(E); + case PT_Uint64: + return this->emitZeroUint64(E); + case PT_IntAP: + return this->emitZeroIntAP(Ctx.getBitWidth(QT), E); + case PT_IntAPS: + return this->emitZeroIntAPS(Ctx.getBitWidth(QT), E); + case PT_Ptr: + return this->emitNullPtr(nullptr, E); + case PT_FnPtr: + return this->emitNullFnPtr(nullptr, E); + case PT_MemberPtr: + return this->emitNullMemberPtr(nullptr, E); + case PT_Float: { + return this->emitConstFloat(APFloat::getZero(Ctx.getFloatSemantics(QT)), E); + } + } + llvm_unreachable("unknown primitive type"); +} + +template <class Emitter> +bool Compiler<Emitter>::visitZeroRecordInitializer(const Record *R, + const Expr *E) { + assert(E); + assert(R); + // Fields + for (const Record::Field &Field : R->fields()) { + const Descriptor *D = Field.Desc; + if (D->isPrimitive()) { + QualType QT = D->getType(); + PrimType T = classifyPrim(D->getType()); + if (!this->visitZeroInitializer(T, QT, E)) + return false; + if (!this->emitInitField(T, Field.Offset, E)) + return false; + continue; + } + + if (!this->emitGetPtrField(Field.Offset, E)) + return false; + + if (D->isPrimitiveArray()) { + QualType ET = D->getElemQualType(); + PrimType T = classifyPrim(ET); + for (uint32_t I = 0, N = D->getNumElems(); I != N; ++I) { + if (!this->visitZeroInitializer(T, ET, E)) + return false; + if (!this->emitInitElem(T, I, E)) + return false; + } + } else if (D->isCompositeArray()) { + const Record *ElemRecord = D->ElemDesc->ElemRecord; + assert(D->ElemDesc->ElemRecord); + for (uint32_t I = 0, N = D->getNumElems(); I != N; ++I) { + if (!this->emitConstUint32(I, E)) + return false; + if (!this->emitArrayElemPtr(PT_Uint32, E)) + return false; + if (!this->visitZeroRecordInitializer(ElemRecord, E)) + return false; + if (!this->emitPopPtr(E)) + return false; + } + } else if (D->isRecord()) { + if (!this->visitZeroRecordInitializer(D->ElemRecord, E)) + return false; + } else { + assert(false); + } + + if (!this->emitPopPtr(E)) + return false; + } + + for (const Record::Base &B : R->bases()) { + if (!this->emitGetPtrBase(B.Offset, E)) + return false; + if (!this->visitZeroRecordInitializer(B.R, E)) + return false; + if (!this->emitFinishInitPop(E)) + return false; + } + + // FIXME: Virtual bases. + + return true; +} + +template <class Emitter> +template <typename T> +bool Compiler<Emitter>::emitConst(T Value, PrimType Ty, const Expr *E) { + switch (Ty) { + case PT_Sint8: + return this->emitConstSint8(Value, E); + case PT_Uint8: + return this->emitConstUint8(Value, E); + case PT_Sint16: + return this->emitConstSint16(Value, E); + case PT_Uint16: + return this->emitConstUint16(Value, E); + case PT_Sint32: + return this->emitConstSint32(Value, E); + case PT_Uint32: + return this->emitConstUint32(Value, E); + case PT_Sint64: + return this->emitConstSint64(Value, E); + case PT_Uint64: + return this->emitConstUint64(Value, E); + case PT_Bool: + return this->emitConstBool(Value, E); + case PT_Ptr: + case PT_FnPtr: + case PT_MemberPtr: + case PT_Float: + case PT_IntAP: + case PT_IntAPS: + llvm_unreachable("Invalid integral type"); + break; + } + llvm_unreachable("unknown primitive type"); +} + +template <class Emitter> +template <typename T> +bool Compiler<Emitter>::emitConst(T Value, const Expr *E) { + return this->emitConst(Value, classifyPrim(E->getType()), E); +} + +template <class Emitter> +bool Compiler<Emitter>::emitConst(const APSInt &Value, PrimType Ty, + const Expr *E) { + if (Ty == PT_IntAPS) + return this->emitConstIntAPS(Value, E); + if (Ty == PT_IntAP) + return this->emitConstIntAP(Value, E); + + if (Value.isSigned()) + return this->emitConst(Value.getSExtValue(), Ty, E); + return this->emitConst(Value.getZExtValue(), Ty, E); +} + +template <class Emitter> +bool Compiler<Emitter>::emitConst(const APSInt &Value, const Expr *E) { + return this->emitConst(Value, classifyPrim(E->getType()), E); +} + +template <class Emitter> +unsigned Compiler<Emitter>::allocateLocalPrimitive(DeclTy &&Src, PrimType Ty, + bool IsConst, + bool IsExtended) { + // Make sure we don't accidentally register the same decl twice. + if (const auto *VD = + dyn_cast_if_present<ValueDecl>(Src.dyn_cast<const Decl *>())) { + assert(!P.getGlobal(VD)); + assert(!Locals.contains(VD)); + (void)VD; + } + + // FIXME: There are cases where Src.is<Expr*>() is wrong, e.g. + // (int){12} in C. Consider using Expr::isTemporaryObject() instead + // or isa<MaterializeTemporaryExpr>(). + Descriptor *D = P.createDescriptor(Src, Ty, Descriptor::InlineDescMD, IsConst, + Src.is<const Expr *>()); + Scope::Local Local = this->createLocal(D); + if (auto *VD = dyn_cast_if_present<ValueDecl>(Src.dyn_cast<const Decl *>())) + Locals.insert({VD, Local}); + VarScope->add(Local, IsExtended); + return Local.Offset; +} + +template <class Emitter> +std::optional<unsigned> +Compiler<Emitter>::allocateLocal(DeclTy &&Src, const ValueDecl *ExtendingDecl) { + // Make sure we don't accidentally register the same decl twice. + if ([[maybe_unused]] const auto *VD = + dyn_cast_if_present<ValueDecl>(Src.dyn_cast<const Decl *>())) { + assert(!P.getGlobal(VD)); + assert(!Locals.contains(VD)); + } + + QualType Ty; + const ValueDecl *Key = nullptr; + const Expr *Init = nullptr; + bool IsTemporary = false; + if (auto *VD = dyn_cast_if_present<ValueDecl>(Src.dyn_cast<const Decl *>())) { + Key = VD; + Ty = VD->getType(); + + if (const auto *VarD = dyn_cast<VarDecl>(VD)) + Init = VarD->getInit(); + } + if (auto *E = Src.dyn_cast<const Expr *>()) { + IsTemporary = true; + Ty = E->getType(); + } + + Descriptor *D = P.createDescriptor( + Src, Ty.getTypePtr(), Descriptor::InlineDescMD, Ty.isConstQualified(), + IsTemporary, /*IsMutable=*/false, Init); + if (!D) + return std::nullopt; + + Scope::Local Local = this->createLocal(D); + if (Key) + Locals.insert({Key, Local}); + if (ExtendingDecl) + VarScope->addExtended(Local, ExtendingDecl); + else + VarScope->add(Local, false); + return Local.Offset; +} + +template <class Emitter> +const RecordType *Compiler<Emitter>::getRecordTy(QualType Ty) { + if (const PointerType *PT = dyn_cast<PointerType>(Ty)) + return PT->getPointeeType()->getAs<RecordType>(); + return Ty->getAs<RecordType>(); +} + +template <class Emitter> Record *Compiler<Emitter>::getRecord(QualType Ty) { + if (const auto *RecordTy = getRecordTy(Ty)) + return getRecord(RecordTy->getDecl()); + return nullptr; +} + +template <class Emitter> +Record *Compiler<Emitter>::getRecord(const RecordDecl *RD) { + return P.getOrCreateRecord(RD); +} + +template <class Emitter> +const Function *Compiler<Emitter>::getFunction(const FunctionDecl *FD) { + return Ctx.getOrCreateFunction(FD); +} + +template <class Emitter> bool Compiler<Emitter>::visitExpr(const Expr *E) { + LocalScope<Emitter> RootScope(this); + // Void expressions. + if (E->getType()->isVoidType()) { + if (!visit(E)) + return false; + return this->emitRetVoid(E) && RootScope.destroyLocals(); + } + + // Expressions with a primitive return type. + if (std::optional<PrimType> T = classify(E)) { + if (!visit(E)) + return false; + return this->emitRet(*T, E) && RootScope.destroyLocals(); + } + + // Expressions with a composite return type. + // For us, that means everything we don't + // have a PrimType for. + if (std::optional<unsigned> LocalOffset = this->allocateLocal(E)) { + if (!this->emitGetPtrLocal(*LocalOffset, E)) + return false; + + if (!visitInitializer(E)) + return false; + + if (!this->emitFinishInit(E)) + return false; + // We are destroying the locals AFTER the Ret op. + // The Ret op needs to copy the (alive) values, but the + // destructors may still turn the entire expression invalid. + return this->emitRetValue(E) && RootScope.destroyLocals(); + } + + RootScope.destroyLocals(); + return false; +} + +template <class Emitter> +VarCreationState Compiler<Emitter>::visitDecl(const VarDecl *VD) { + + auto R = this->visitVarDecl(VD, /*Toplevel=*/true); + + if (R.notCreated()) + return R; + + if (R) + return true; + + if (!R && Context::shouldBeGloballyIndexed(VD)) { + if (auto GlobalIndex = P.getGlobal(VD)) { + Block *GlobalBlock = P.getGlobal(*GlobalIndex); + GlobalInlineDescriptor &GD = + *reinterpret_cast<GlobalInlineDescriptor *>(GlobalBlock->rawData()); + + GD.InitState = GlobalInitState::InitializerFailed; + GlobalBlock->invokeDtor(); + } + } + + return R; +} + +/// Toplevel visitDeclAndReturn(). +/// We get here from evaluateAsInitializer(). +/// We need to evaluate the initializer and return its value. +template <class Emitter> +bool Compiler<Emitter>::visitDeclAndReturn(const VarDecl *VD, + bool ConstantContext) { + std::optional<PrimType> VarT = classify(VD->getType()); + + // We only create variables if we're evaluating in a constant context. + // Otherwise, just evaluate the initializer and return it. + if (!ConstantContext) { + DeclScope<Emitter> LS(this, VD); + if (!this->visit(VD->getAnyInitializer())) + return false; + return this->emitRet(VarT.value_or(PT_Ptr), VD) && LS.destroyLocals(); + } + + LocalScope<Emitter> VDScope(this, VD); + if (!this->visitVarDecl(VD, /*Toplevel=*/true)) + return false; + + if (Context::shouldBeGloballyIndexed(VD)) { + auto GlobalIndex = P.getGlobal(VD); + assert(GlobalIndex); // visitVarDecl() didn't return false. + if (VarT) { + if (!this->emitGetGlobalUnchecked(*VarT, *GlobalIndex, VD)) + return false; + } else { + if (!this->emitGetPtrGlobal(*GlobalIndex, VD)) + return false; + } + } else { + auto Local = Locals.find(VD); + assert(Local != Locals.end()); // Same here. + if (VarT) { + if (!this->emitGetLocal(*VarT, Local->second.Offset, VD)) + return false; + } else { + if (!this->emitGetPtrLocal(Local->second.Offset, VD)) + return false; + } + } + + // Return the value. + if (!this->emitRet(VarT.value_or(PT_Ptr), VD)) { + // If the Ret above failed and this is a global variable, mark it as + // uninitialized, even everything else succeeded. + if (Context::shouldBeGloballyIndexed(VD)) { + auto GlobalIndex = P.getGlobal(VD); + assert(GlobalIndex); + Block *GlobalBlock = P.getGlobal(*GlobalIndex); + GlobalInlineDescriptor &GD = + *reinterpret_cast<GlobalInlineDescriptor *>(GlobalBlock->rawData()); + + GD.InitState = GlobalInitState::InitializerFailed; + GlobalBlock->invokeDtor(); + } + return false; + } + + return VDScope.destroyLocals(); +} + +template <class Emitter> +VarCreationState Compiler<Emitter>::visitVarDecl(const VarDecl *VD, bool Toplevel) { + // We don't know what to do with these, so just return false. + if (VD->getType().isNull()) + return false; + + // This case is EvalEmitter-only. If we won't create any instructions for the + // initializer anyway, don't bother creating the variable in the first place. + if (!this->isActive()) + return VarCreationState::NotCreated(); + + const Expr *Init = VD->getInit(); + std::optional<PrimType> VarT = classify(VD->getType()); + + if (Context::shouldBeGloballyIndexed(VD)) { + auto checkDecl = [&]() -> bool { + bool NeedsOp = !Toplevel && VD->isLocalVarDecl() && VD->isStaticLocal(); + return !NeedsOp || this->emitCheckDecl(VD, VD); + }; + + auto initGlobal = [&](unsigned GlobalIndex) -> bool { + assert(Init); + DeclScope<Emitter> LocalScope(this, VD); + + if (VarT) { + if (!this->visit(Init)) + return checkDecl() && false; + + return checkDecl() && this->emitInitGlobal(*VarT, GlobalIndex, VD); + } + + if (!checkDecl()) + return false; + + if (!this->emitGetPtrGlobal(GlobalIndex, Init)) + return false; + + if (!visitInitializer(Init)) + return false; + + if (!this->emitFinishInit(Init)) + return false; + + return this->emitPopPtr(Init); + }; + + // We've already seen and initialized this global. + if (std::optional<unsigned> GlobalIndex = P.getGlobal(VD)) { + if (P.getPtrGlobal(*GlobalIndex).isInitialized()) + return checkDecl(); + + // The previous attempt at initialization might've been unsuccessful, + // so let's try this one. + return Init && checkDecl() && initGlobal(*GlobalIndex); + } + + std::optional<unsigned> GlobalIndex = P.createGlobal(VD, Init); + + if (!GlobalIndex) + return false; + + return !Init || (checkDecl() && initGlobal(*GlobalIndex)); + } else { + InitLinkScope<Emitter> ILS(this, InitLink::Decl(VD)); + + if (VarT) { + unsigned Offset = this->allocateLocalPrimitive( + VD, *VarT, VD->getType().isConstQualified()); + if (Init) { + // If this is a toplevel declaration, create a scope for the + // initializer. + if (Toplevel) { + LocalScope<Emitter> Scope(this); + if (!this->visit(Init)) + return false; + return this->emitSetLocal(*VarT, Offset, VD) && Scope.destroyLocals(); + } else { + if (!this->visit(Init)) + return false; + return this->emitSetLocal(*VarT, Offset, VD); + } + } + } else { + if (std::optional<unsigned> Offset = this->allocateLocal(VD)) { + if (!Init) + return true; + + if (!this->emitGetPtrLocal(*Offset, Init)) + return false; + + if (!visitInitializer(Init)) + return false; + + if (!this->emitFinishInit(Init)) + return false; + + return this->emitPopPtr(Init); + } + return false; + } + return true; + } + + return false; +} + +template <class Emitter> +bool Compiler<Emitter>::visitAPValue(const APValue &Val, PrimType ValType, + const Expr *E) { + assert(!DiscardResult); + if (Val.isInt()) + return this->emitConst(Val.getInt(), ValType, E); + else if (Val.isFloat()) + return this->emitConstFloat(Val.getFloat(), E); + + if (Val.isLValue()) { + if (Val.isNullPointer()) + return this->emitNull(ValType, nullptr, E); + APValue::LValueBase Base = Val.getLValueBase(); + if (const Expr *BaseExpr = Base.dyn_cast<const Expr *>()) + return this->visit(BaseExpr); + else if (const auto *VD = Base.dyn_cast<const ValueDecl *>()) { + return this->visitDeclRef(VD, E); + } + } else if (Val.isMemberPointer()) { + if (const ValueDecl *MemberDecl = Val.getMemberPointerDecl()) + return this->emitGetMemberPtr(MemberDecl, E); + return this->emitNullMemberPtr(nullptr, E); + } + + return false; +} + +template <class Emitter> +bool Compiler<Emitter>::visitAPValueInitializer(const APValue &Val, + const Expr *E) { + + if (Val.isStruct()) { + const Record *R = this->getRecord(E->getType()); + assert(R); + for (unsigned I = 0, N = Val.getStructNumFields(); I != N; ++I) { + const APValue &F = Val.getStructField(I); + const Record::Field *RF = R->getField(I); + + if (F.isInt() || F.isFloat() || F.isLValue() || F.isMemberPointer()) { + PrimType T = classifyPrim(RF->Decl->getType()); + if (!this->visitAPValue(F, T, E)) + return false; + if (!this->emitInitField(T, RF->Offset, E)) + return false; + } else if (F.isArray()) { + assert(RF->Desc->isPrimitiveArray()); + const auto *ArrType = RF->Decl->getType()->getAsArrayTypeUnsafe(); + PrimType ElemT = classifyPrim(ArrType->getElementType()); + assert(ArrType); + + if (!this->emitGetPtrField(RF->Offset, E)) + return false; + + for (unsigned A = 0, AN = F.getArraySize(); A != AN; ++A) { + if (!this->visitAPValue(F.getArrayInitializedElt(A), ElemT, E)) + return false; + if (!this->emitInitElem(ElemT, A, E)) + return false; + } + + if (!this->emitPopPtr(E)) + return false; + } else if (F.isStruct() || F.isUnion()) { + if (!this->emitGetPtrField(RF->Offset, E)) + return false; + if (!this->visitAPValueInitializer(F, E)) + return false; + if (!this->emitPopPtr(E)) + return false; + } else { + assert(false && "I don't think this should be possible"); + } + } + return true; + } else if (Val.isUnion()) { + const FieldDecl *UnionField = Val.getUnionField(); + const Record *R = this->getRecord(UnionField->getParent()); + assert(R); + const APValue &F = Val.getUnionValue(); + const Record::Field *RF = R->getField(UnionField); + PrimType T = classifyPrim(RF->Decl->getType()); + if (!this->visitAPValue(F, T, E)) + return false; + return this->emitInitField(T, RF->Offset, E); + } + // TODO: Other types. + + return false; +} + +template <class Emitter> +bool Compiler<Emitter>::VisitBuiltinCallExpr(const CallExpr *E) { + const Function *Func = getFunction(E->getDirectCallee()); + if (!Func) + return false; + + // For these, we're expected to ultimately return an APValue pointing + // to the CallExpr. This is needed to get the correct codegen. + unsigned Builtin = E->getBuiltinCallee(); + if (Builtin == Builtin::BI__builtin___CFStringMakeConstantString || + Builtin == Builtin::BI__builtin___NSStringMakeConstantString || + Builtin == Builtin::BI__builtin_ptrauth_sign_constant || + Builtin == Builtin::BI__builtin_function_start) { + if (std::optional<unsigned> GlobalOffset = P.createGlobal(E)) { + if (!this->emitGetPtrGlobal(*GlobalOffset, E)) + return false; + + if (PrimType PT = classifyPrim(E); PT != PT_Ptr && isPtrType(PT)) + return this->emitDecayPtr(PT_Ptr, PT, E); + return true; + } + return false; + } + + QualType ReturnType = E->getType(); + std::optional<PrimType> ReturnT = classify(E); + + // Non-primitive return type. Prepare storage. + if (!Initializing && !ReturnT && !ReturnType->isVoidType()) { + std::optional<unsigned> LocalIndex = allocateLocal(E); + if (!LocalIndex) + return false; + if (!this->emitGetPtrLocal(*LocalIndex, E)) + return false; + } + + if (!Func->isUnevaluatedBuiltin()) { + // Put arguments on the stack. + for (const auto *Arg : E->arguments()) { + if (!this->visit(Arg)) + return false; + } + } + + if (!this->emitCallBI(Func, E, E)) + return false; + + if (DiscardResult && !ReturnType->isVoidType()) { + assert(ReturnT); + return this->emitPop(*ReturnT, E); + } + + return true; +} + +template <class Emitter> +bool Compiler<Emitter>::VisitCallExpr(const CallExpr *E) { + if (E->getBuiltinCallee()) + return VisitBuiltinCallExpr(E); + + QualType ReturnType = E->getCallReturnType(Ctx.getASTContext()); + std::optional<PrimType> T = classify(ReturnType); + bool HasRVO = !ReturnType->isVoidType() && !T; + const FunctionDecl *FuncDecl = E->getDirectCallee(); + + if (HasRVO) { + if (DiscardResult) { + // If we need to discard the return value but the function returns its + // value via an RVO pointer, we need to create one such pointer just + // for this call. + if (std::optional<unsigned> LocalIndex = allocateLocal(E)) { + if (!this->emitGetPtrLocal(*LocalIndex, E)) + return false; + } + } else { + // We need the result. Prepare a pointer to return or + // dup the current one. + if (!Initializing) { + if (std::optional<unsigned> LocalIndex = allocateLocal(E)) { + if (!this->emitGetPtrLocal(*LocalIndex, E)) + return false; + } + } + if (!this->emitDupPtr(E)) + return false; + } + } + + auto Args = llvm::ArrayRef(E->getArgs(), E->getNumArgs()); + // Calling a static operator will still + // pass the instance, but we don't need it. + // Discard it here. + if (isa<CXXOperatorCallExpr>(E)) { + if (const auto *MD = dyn_cast_if_present<CXXMethodDecl>(FuncDecl); + MD && MD->isStatic()) { + if (!this->discard(E->getArg(0))) + return false; + Args = Args.drop_front(); + } + } + + std::optional<unsigned> CalleeOffset; + // Add the (optional, implicit) This pointer. + if (const auto *MC = dyn_cast<CXXMemberCallExpr>(E)) { + if (!FuncDecl && classifyPrim(E->getCallee()) == PT_MemberPtr) { + // If we end up creating a CallPtr op for this, we need the base of the + // member pointer as the instance pointer, and later extract the function + // decl as the function pointer. + const Expr *Callee = E->getCallee(); + CalleeOffset = + this->allocateLocalPrimitive(Callee, PT_MemberPtr, true, false); + if (!this->visit(Callee)) + return false; + if (!this->emitSetLocal(PT_MemberPtr, *CalleeOffset, E)) + return false; + if (!this->emitGetLocal(PT_MemberPtr, *CalleeOffset, E)) + return false; + if (!this->emitGetMemberPtrBase(E)) + return false; + } else if (!this->visit(MC->getImplicitObjectArgument())) { + return false; + } + } + + llvm::BitVector NonNullArgs = collectNonNullArgs(FuncDecl, Args); + // Put arguments on the stack. + unsigned ArgIndex = 0; + for (const auto *Arg : Args) { + if (!this->visit(Arg)) + return false; + + // If we know the callee already, check the known parametrs for nullability. + if (FuncDecl && NonNullArgs[ArgIndex]) { + PrimType ArgT = classify(Arg).value_or(PT_Ptr); + if (ArgT == PT_Ptr || ArgT == PT_FnPtr) { + if (!this->emitCheckNonNullArg(ArgT, Arg)) + return false; + } + } + ++ArgIndex; + } + + if (FuncDecl) { + const Function *Func = getFunction(FuncDecl); + if (!Func) + return false; + assert(HasRVO == Func->hasRVO()); + + bool HasQualifier = false; + if (const auto *ME = dyn_cast<MemberExpr>(E->getCallee())) + HasQualifier = ME->hasQualifier(); + + bool IsVirtual = false; + if (const auto *MD = dyn_cast<CXXMethodDecl>(FuncDecl)) + IsVirtual = MD->isVirtual(); + + // In any case call the function. The return value will end up on the stack + // and if the function has RVO, we already have the pointer on the stack to + // write the result into. + if (IsVirtual && !HasQualifier) { + uint32_t VarArgSize = 0; + unsigned NumParams = + Func->getNumWrittenParams() + isa<CXXOperatorCallExpr>(E); + for (unsigned I = NumParams, N = E->getNumArgs(); I != N; ++I) + VarArgSize += align(primSize(classify(E->getArg(I)).value_or(PT_Ptr))); + + if (!this->emitCallVirt(Func, VarArgSize, E)) + return false; + } else if (Func->isVariadic()) { + uint32_t VarArgSize = 0; + unsigned NumParams = + Func->getNumWrittenParams() + isa<CXXOperatorCallExpr>(E); + for (unsigned I = NumParams, N = E->getNumArgs(); I != N; ++I) + VarArgSize += align(primSize(classify(E->getArg(I)).value_or(PT_Ptr))); + if (!this->emitCallVar(Func, VarArgSize, E)) + return false; + } else { + if (!this->emitCall(Func, 0, E)) + return false; + } + } else { + // Indirect call. Visit the callee, which will leave a FunctionPointer on + // the stack. Cleanup of the returned value if necessary will be done after + // the function call completed. + + // Sum the size of all args from the call expr. + uint32_t ArgSize = 0; + for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I) + ArgSize += align(primSize(classify(E->getArg(I)).value_or(PT_Ptr))); + + // Get the callee, either from a member pointer saved in CalleeOffset, + // or by just visiting the Callee expr. + if (CalleeOffset) { + if (!this->emitGetLocal(PT_MemberPtr, *CalleeOffset, E)) + return false; + if (!this->emitGetMemberPtrDecl(E)) + return false; + if (!this->emitCallPtr(ArgSize, E, E)) + return false; + } else { + if (!this->visit(E->getCallee())) + return false; + + if (!this->emitCallPtr(ArgSize, E, E)) + return false; + } + } + + // Cleanup for discarded return values. + if (DiscardResult && !ReturnType->isVoidType() && T) + return this->emitPop(*T, E); + + return true; +} + +template <class Emitter> +bool Compiler<Emitter>::VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *E) { + SourceLocScope<Emitter> SLS(this, E); + + return this->delegate(E->getExpr()); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *E) { + SourceLocScope<Emitter> SLS(this, E); + + const Expr *SubExpr = E->getExpr(); + if (std::optional<PrimType> T = classify(E->getExpr())) + return this->visit(SubExpr); + + assert(Initializing); + return this->visitInitializer(SubExpr); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { + if (DiscardResult) + return true; + + return this->emitConstBool(E->getValue(), E); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitCXXNullPtrLiteralExpr( + const CXXNullPtrLiteralExpr *E) { + if (DiscardResult) + return true; + + return this->emitNullPtr(nullptr, E); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitGNUNullExpr(const GNUNullExpr *E) { + if (DiscardResult) + return true; + + assert(E->getType()->isIntegerType()); + + PrimType T = classifyPrim(E->getType()); + return this->emitZero(T, E); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitCXXThisExpr(const CXXThisExpr *E) { + if (DiscardResult) + return true; + + if (this->LambdaThisCapture.Offset > 0) { + if (this->LambdaThisCapture.IsPtr) + return this->emitGetThisFieldPtr(this->LambdaThisCapture.Offset, E); + return this->emitGetPtrThisField(this->LambdaThisCapture.Offset, E); + } + + // In some circumstances, the 'this' pointer does not actually refer to the + // instance pointer of the current function frame, but e.g. to the declaration + // currently being initialized. Here we emit the necessary instruction(s) for + // this scenario. + if (!InitStackActive || !E->isImplicit()) + return this->emitThis(E); + + if (InitStackActive && !InitStack.empty()) { + unsigned StartIndex = 0; + for (StartIndex = InitStack.size() - 1; StartIndex > 0; --StartIndex) { + if (InitStack[StartIndex].Kind != InitLink::K_Field) + break; + } + + for (unsigned I = StartIndex, N = InitStack.size(); I != N; ++I) { + if (!InitStack[I].template emit<Emitter>(this, E)) + return false; + } + return true; + } + return this->emitThis(E); +} + +template <class Emitter> bool Compiler<Emitter>::visitStmt(const Stmt *S) { + switch (S->getStmtClass()) { + case Stmt::CompoundStmtClass: + return visitCompoundStmt(cast<CompoundStmt>(S)); + case Stmt::DeclStmtClass: + return visitDeclStmt(cast<DeclStmt>(S)); + case Stmt::ReturnStmtClass: + return visitReturnStmt(cast<ReturnStmt>(S)); + case Stmt::IfStmtClass: + return visitIfStmt(cast<IfStmt>(S)); + case Stmt::WhileStmtClass: + return visitWhileStmt(cast<WhileStmt>(S)); + case Stmt::DoStmtClass: + return visitDoStmt(cast<DoStmt>(S)); + case Stmt::ForStmtClass: + return visitForStmt(cast<ForStmt>(S)); + case Stmt::CXXForRangeStmtClass: + return visitCXXForRangeStmt(cast<CXXForRangeStmt>(S)); + case Stmt::BreakStmtClass: + return visitBreakStmt(cast<BreakStmt>(S)); + case Stmt::ContinueStmtClass: + return visitContinueStmt(cast<ContinueStmt>(S)); + case Stmt::SwitchStmtClass: + return visitSwitchStmt(cast<SwitchStmt>(S)); + case Stmt::CaseStmtClass: + return visitCaseStmt(cast<CaseStmt>(S)); + case Stmt::DefaultStmtClass: + return visitDefaultStmt(cast<DefaultStmt>(S)); + case Stmt::AttributedStmtClass: + return visitAttributedStmt(cast<AttributedStmt>(S)); + case Stmt::CXXTryStmtClass: + return visitCXXTryStmt(cast<CXXTryStmt>(S)); + case Stmt::NullStmtClass: + return true; + // Always invalid statements. + case Stmt::GCCAsmStmtClass: + case Stmt::MSAsmStmtClass: + case Stmt::GotoStmtClass: + return this->emitInvalid(S); + case Stmt::LabelStmtClass: + return this->visitStmt(cast<LabelStmt>(S)->getSubStmt()); + default: { + if (const auto *E = dyn_cast<Expr>(S)) + return this->discard(E); + return false; + } + } +} + +/// Visits the given statment without creating a variable +/// scope for it in case it is a compound statement. +template <class Emitter> bool Compiler<Emitter>::visitLoopBody(const Stmt *S) { + if (isa<NullStmt>(S)) + return true; + + if (const auto *CS = dyn_cast<CompoundStmt>(S)) { + for (const auto *InnerStmt : CS->body()) + if (!visitStmt(InnerStmt)) + return false; + return true; + } + + return this->visitStmt(S); +} + +template <class Emitter> +bool Compiler<Emitter>::visitCompoundStmt(const CompoundStmt *S) { + BlockScope<Emitter> Scope(this); + for (const auto *InnerStmt : S->body()) + if (!visitStmt(InnerStmt)) + return false; + return Scope.destroyLocals(); +} + +template <class Emitter> +bool Compiler<Emitter>::visitDeclStmt(const DeclStmt *DS) { + for (const auto *D : DS->decls()) { + if (isa<StaticAssertDecl, TagDecl, TypedefNameDecl, UsingEnumDecl, + FunctionDecl>(D)) + continue; + + const auto *VD = dyn_cast<VarDecl>(D); + if (!VD) + return false; + if (!this->visitVarDecl(VD)) + return false; + } + + return true; +} + +template <class Emitter> +bool Compiler<Emitter>::visitReturnStmt(const ReturnStmt *RS) { + if (this->InStmtExpr) + return this->emitUnsupported(RS); + + if (const Expr *RE = RS->getRetValue()) { + LocalScope<Emitter> RetScope(this); + if (ReturnType) { + // Primitive types are simply returned. + if (!this->visit(RE)) + return false; + this->emitCleanup(); + return this->emitRet(*ReturnType, RS); + } else if (RE->getType()->isVoidType()) { + if (!this->visit(RE)) + return false; + } else { + // RVO - construct the value in the return location. + if (!this->emitRVOPtr(RE)) + return false; + if (!this->visitInitializer(RE)) + return false; + if (!this->emitPopPtr(RE)) + return false; + + this->emitCleanup(); + return this->emitRetVoid(RS); + } + } + + // Void return. + this->emitCleanup(); + return this->emitRetVoid(RS); +} + +template <class Emitter> bool Compiler<Emitter>::visitIfStmt(const IfStmt *IS) { + BlockScope<Emitter> IfScope(this); + + if (IS->isNonNegatedConsteval()) + return visitStmt(IS->getThen()); + if (IS->isNegatedConsteval()) + return IS->getElse() ? visitStmt(IS->getElse()) : true; + + if (auto *CondInit = IS->getInit()) + if (!visitStmt(CondInit)) + return false; + + if (const DeclStmt *CondDecl = IS->getConditionVariableDeclStmt()) + if (!visitDeclStmt(CondDecl)) + return false; + + if (!this->visitBool(IS->getCond())) + return false; + + if (const Stmt *Else = IS->getElse()) { + LabelTy LabelElse = this->getLabel(); + LabelTy LabelEnd = this->getLabel(); + if (!this->jumpFalse(LabelElse)) + return false; + if (!visitStmt(IS->getThen())) + return false; + if (!this->jump(LabelEnd)) + return false; + this->emitLabel(LabelElse); + if (!visitStmt(Else)) + return false; + this->emitLabel(LabelEnd); + } else { + LabelTy LabelEnd = this->getLabel(); + if (!this->jumpFalse(LabelEnd)) + return false; + if (!visitStmt(IS->getThen())) + return false; + this->emitLabel(LabelEnd); + } + + return IfScope.destroyLocals(); +} + +template <class Emitter> +bool Compiler<Emitter>::visitWhileStmt(const WhileStmt *S) { + const Expr *Cond = S->getCond(); + const Stmt *Body = S->getBody(); + + LabelTy CondLabel = this->getLabel(); // Label before the condition. + LabelTy EndLabel = this->getLabel(); // Label after the loop. + LoopScope<Emitter> LS(this, EndLabel, CondLabel); + + this->fallthrough(CondLabel); + this->emitLabel(CondLabel); + + if (const DeclStmt *CondDecl = S->getConditionVariableDeclStmt()) + if (!visitDeclStmt(CondDecl)) + return false; + + if (!this->visitBool(Cond)) + return false; + if (!this->jumpFalse(EndLabel)) + return false; + + LocalScope<Emitter> Scope(this); + { + DestructorScope<Emitter> DS(Scope); + if (!this->visitLoopBody(Body)) + return false; + } + + if (!this->jump(CondLabel)) + return false; + this->emitLabel(EndLabel); + + return true; +} + +template <class Emitter> bool Compiler<Emitter>::visitDoStmt(const DoStmt *S) { + const Expr *Cond = S->getCond(); + const Stmt *Body = S->getBody(); + + LabelTy StartLabel = this->getLabel(); + LabelTy EndLabel = this->getLabel(); + LabelTy CondLabel = this->getLabel(); + LoopScope<Emitter> LS(this, EndLabel, CondLabel); + LocalScope<Emitter> Scope(this); + + this->fallthrough(StartLabel); + this->emitLabel(StartLabel); + { + DestructorScope<Emitter> DS(Scope); + + if (!this->visitLoopBody(Body)) + return false; + this->fallthrough(CondLabel); + this->emitLabel(CondLabel); + if (!this->visitBool(Cond)) + return false; + } + if (!this->jumpTrue(StartLabel)) + return false; + + this->fallthrough(EndLabel); + this->emitLabel(EndLabel); + return true; +} + +template <class Emitter> +bool Compiler<Emitter>::visitForStmt(const ForStmt *S) { + // for (Init; Cond; Inc) { Body } + const Stmt *Init = S->getInit(); + const Expr *Cond = S->getCond(); + const Expr *Inc = S->getInc(); + const Stmt *Body = S->getBody(); + + LabelTy EndLabel = this->getLabel(); + LabelTy CondLabel = this->getLabel(); + LabelTy IncLabel = this->getLabel(); + LoopScope<Emitter> LS(this, EndLabel, IncLabel); + LocalScope<Emitter> Scope(this); + + if (Init && !this->visitStmt(Init)) + return false; + this->fallthrough(CondLabel); + this->emitLabel(CondLabel); + + if (const DeclStmt *CondDecl = S->getConditionVariableDeclStmt()) + if (!visitDeclStmt(CondDecl)) + return false; + + if (Cond) { + if (!this->visitBool(Cond)) + return false; + if (!this->jumpFalse(EndLabel)) + return false; + } + + { + DestructorScope<Emitter> DS(Scope); + + if (Body && !this->visitLoopBody(Body)) + return false; + this->fallthrough(IncLabel); + this->emitLabel(IncLabel); + if (Inc && !this->discard(Inc)) + return false; + } + + if (!this->jump(CondLabel)) + return false; + this->fallthrough(EndLabel); + this->emitLabel(EndLabel); + return true; +} + +template <class Emitter> +bool Compiler<Emitter>::visitCXXForRangeStmt(const CXXForRangeStmt *S) { + const Stmt *Init = S->getInit(); + const Expr *Cond = S->getCond(); + const Expr *Inc = S->getInc(); + const Stmt *Body = S->getBody(); + const Stmt *BeginStmt = S->getBeginStmt(); + const Stmt *RangeStmt = S->getRangeStmt(); + const Stmt *EndStmt = S->getEndStmt(); + const VarDecl *LoopVar = S->getLoopVariable(); + + LabelTy EndLabel = this->getLabel(); + LabelTy CondLabel = this->getLabel(); + LabelTy IncLabel = this->getLabel(); + LoopScope<Emitter> LS(this, EndLabel, IncLabel); + + // Emit declarations needed in the loop. + if (Init && !this->visitStmt(Init)) + return false; + if (!this->visitStmt(RangeStmt)) + return false; + if (!this->visitStmt(BeginStmt)) + return false; + if (!this->visitStmt(EndStmt)) + return false; + + // Now the condition as well as the loop variable assignment. + this->fallthrough(CondLabel); + this->emitLabel(CondLabel); + if (!this->visitBool(Cond)) + return false; + if (!this->jumpFalse(EndLabel)) + return false; + + if (!this->visitVarDecl(LoopVar)) + return false; + + // Body. + LocalScope<Emitter> Scope(this); + { + DestructorScope<Emitter> DS(Scope); + + if (!this->visitLoopBody(Body)) + return false; + this->fallthrough(IncLabel); + this->emitLabel(IncLabel); + if (!this->discard(Inc)) + return false; + } + + if (!this->jump(CondLabel)) + return false; + + this->fallthrough(EndLabel); + this->emitLabel(EndLabel); + return true; +} + +template <class Emitter> +bool Compiler<Emitter>::visitBreakStmt(const BreakStmt *S) { + if (!BreakLabel) + return false; + + this->VarScope->emitDestructors(); + return this->jump(*BreakLabel); +} + +template <class Emitter> +bool Compiler<Emitter>::visitContinueStmt(const ContinueStmt *S) { + if (!ContinueLabel) + return false; + + this->VarScope->emitDestructors(); + return this->jump(*ContinueLabel); +} + +template <class Emitter> +bool Compiler<Emitter>::visitSwitchStmt(const SwitchStmt *S) { + const Expr *Cond = S->getCond(); + PrimType CondT = this->classifyPrim(Cond->getType()); + + LabelTy EndLabel = this->getLabel(); + OptLabelTy DefaultLabel = std::nullopt; + unsigned CondVar = this->allocateLocalPrimitive(Cond, CondT, true, false); + + if (const auto *CondInit = S->getInit()) + if (!visitStmt(CondInit)) + return false; + + if (const DeclStmt *CondDecl = S->getConditionVariableDeclStmt()) + if (!visitDeclStmt(CondDecl)) + return false; + + // Initialize condition variable. + if (!this->visit(Cond)) + return false; + if (!this->emitSetLocal(CondT, CondVar, S)) + return false; + + CaseMap CaseLabels; + // Create labels and comparison ops for all case statements. + for (const SwitchCase *SC = S->getSwitchCaseList(); SC; + SC = SC->getNextSwitchCase()) { + if (const auto *CS = dyn_cast<CaseStmt>(SC)) { + // FIXME: Implement ranges. + if (CS->caseStmtIsGNURange()) + return false; + CaseLabels[SC] = this->getLabel(); + + const Expr *Value = CS->getLHS(); + PrimType ValueT = this->classifyPrim(Value->getType()); + + // Compare the case statement's value to the switch condition. + if (!this->emitGetLocal(CondT, CondVar, CS)) + return false; + if (!this->visit(Value)) + return false; + + // Compare and jump to the case label. + if (!this->emitEQ(ValueT, S)) + return false; + if (!this->jumpTrue(CaseLabels[CS])) + return false; + } else { + assert(!DefaultLabel); + DefaultLabel = this->getLabel(); + } + } + + // If none of the conditions above were true, fall through to the default + // statement or jump after the switch statement. + if (DefaultLabel) { + if (!this->jump(*DefaultLabel)) + return false; + } else { + if (!this->jump(EndLabel)) + return false; + } + + SwitchScope<Emitter> SS(this, std::move(CaseLabels), EndLabel, DefaultLabel); + if (!this->visitStmt(S->getBody())) + return false; + this->emitLabel(EndLabel); + return true; +} + +template <class Emitter> +bool Compiler<Emitter>::visitCaseStmt(const CaseStmt *S) { + this->emitLabel(CaseLabels[S]); + return this->visitStmt(S->getSubStmt()); +} + +template <class Emitter> +bool Compiler<Emitter>::visitDefaultStmt(const DefaultStmt *S) { + this->emitLabel(*DefaultLabel); + return this->visitStmt(S->getSubStmt()); +} + +template <class Emitter> +bool Compiler<Emitter>::visitAttributedStmt(const AttributedStmt *S) { + if (this->Ctx.getLangOpts().CXXAssumptions && + !this->Ctx.getLangOpts().MSVCCompat) { + for (const Attr *A : S->getAttrs()) { + auto *AA = dyn_cast<CXXAssumeAttr>(A); + if (!AA) + continue; + + assert(isa<NullStmt>(S->getSubStmt())); + + const Expr *Assumption = AA->getAssumption(); + if (Assumption->isValueDependent()) + return false; + + if (Assumption->HasSideEffects(this->Ctx.getASTContext())) + continue; + + // Evaluate assumption. + if (!this->visitBool(Assumption)) + return false; + + if (!this->emitAssume(Assumption)) + return false; + } + } + + // Ignore other attributes. + return this->visitStmt(S->getSubStmt()); +} + +template <class Emitter> +bool Compiler<Emitter>::visitCXXTryStmt(const CXXTryStmt *S) { + // Ignore all handlers. + return this->visitStmt(S->getTryBlock()); +} + +template <class Emitter> +bool Compiler<Emitter>::emitLambdaStaticInvokerBody(const CXXMethodDecl *MD) { + assert(MD->isLambdaStaticInvoker()); + assert(MD->hasBody()); + assert(cast<CompoundStmt>(MD->getBody())->body_empty()); + + const CXXRecordDecl *ClosureClass = MD->getParent(); + const CXXMethodDecl *LambdaCallOp = ClosureClass->getLambdaCallOperator(); + assert(ClosureClass->captures_begin() == ClosureClass->captures_end()); + const Function *Func = this->getFunction(LambdaCallOp); + if (!Func) + return false; + assert(Func->hasThisPointer()); + assert(Func->getNumParams() == (MD->getNumParams() + 1 + Func->hasRVO())); + + if (Func->hasRVO()) { + if (!this->emitRVOPtr(MD)) + return false; + } + + // The lambda call operator needs an instance pointer, but we don't have + // one here, and we don't need one either because the lambda cannot have + // any captures, as verified above. Emit a null pointer. This is then + // special-cased when interpreting to not emit any misleading diagnostics. + if (!this->emitNullPtr(nullptr, MD)) + return false; + + // Forward all arguments from the static invoker to the lambda call operator. + for (const ParmVarDecl *PVD : MD->parameters()) { + auto It = this->Params.find(PVD); + assert(It != this->Params.end()); + + // We do the lvalue-to-rvalue conversion manually here, so no need + // to care about references. + PrimType ParamType = this->classify(PVD->getType()).value_or(PT_Ptr); + if (!this->emitGetParam(ParamType, It->second.Offset, MD)) + return false; + } + + if (!this->emitCall(Func, 0, LambdaCallOp)) + return false; + + this->emitCleanup(); + if (ReturnType) + return this->emitRet(*ReturnType, MD); + + // Nothing to do, since we emitted the RVO pointer above. + return this->emitRetVoid(MD); +} + +template <class Emitter> +bool Compiler<Emitter>::visitFunc(const FunctionDecl *F) { + // Classify the return type. + ReturnType = this->classify(F->getReturnType()); + + auto emitFieldInitializer = [&](const Record::Field *F, unsigned FieldOffset, + const Expr *InitExpr) -> bool { + // We don't know what to do with these, so just return false. + if (InitExpr->getType().isNull()) + return false; + + if (std::optional<PrimType> T = this->classify(InitExpr)) { + if (!this->visit(InitExpr)) + return false; + + if (F->isBitField()) + return this->emitInitThisBitField(*T, F, FieldOffset, InitExpr); + return this->emitInitThisField(*T, FieldOffset, InitExpr); + } + // Non-primitive case. Get a pointer to the field-to-initialize + // on the stack and call visitInitialzer() for it. + InitLinkScope<Emitter> FieldScope(this, InitLink::Field(F->Offset)); + if (!this->emitGetPtrThisField(FieldOffset, InitExpr)) + return false; + + if (!this->visitInitializer(InitExpr)) + return false; + + return this->emitPopPtr(InitExpr); + }; + + // Emit custom code if this is a lambda static invoker. + if (const auto *MD = dyn_cast<CXXMethodDecl>(F); + MD && MD->isLambdaStaticInvoker()) + return this->emitLambdaStaticInvokerBody(MD); + + // Constructor. Set up field initializers. + if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(F)) { + const RecordDecl *RD = Ctor->getParent(); + const Record *R = this->getRecord(RD); + if (!R) + return false; + + InitLinkScope<Emitter> InitScope(this, InitLink::This()); + for (const auto *Init : Ctor->inits()) { + // Scope needed for the initializers. + BlockScope<Emitter> Scope(this); + + const Expr *InitExpr = Init->getInit(); + if (const FieldDecl *Member = Init->getMember()) { + const Record::Field *F = R->getField(Member); + + if (!emitFieldInitializer(F, F->Offset, InitExpr)) + return false; + } else if (const Type *Base = Init->getBaseClass()) { + const auto *BaseDecl = Base->getAsCXXRecordDecl(); + assert(BaseDecl); + + if (Init->isBaseVirtual()) { + assert(R->getVirtualBase(BaseDecl)); + if (!this->emitGetPtrThisVirtBase(BaseDecl, InitExpr)) + return false; + + } else { + // Base class initializer. + // Get This Base and call initializer on it. + const Record::Base *B = R->getBase(BaseDecl); + assert(B); + if (!this->emitGetPtrThisBase(B->Offset, InitExpr)) + return false; + } + + if (!this->visitInitializer(InitExpr)) + return false; + if (!this->emitFinishInitPop(InitExpr)) + return false; + } else if (const IndirectFieldDecl *IFD = Init->getIndirectMember()) { + assert(IFD->getChainingSize() >= 2); + + unsigned NestedFieldOffset = 0; + const Record::Field *NestedField = nullptr; + for (const NamedDecl *ND : IFD->chain()) { + const auto *FD = cast<FieldDecl>(ND); + const Record *FieldRecord = + this->P.getOrCreateRecord(FD->getParent()); + assert(FieldRecord); + + NestedField = FieldRecord->getField(FD); + assert(NestedField); + + NestedFieldOffset += NestedField->Offset; + } + assert(NestedField); + + if (!emitFieldInitializer(NestedField, NestedFieldOffset, InitExpr)) + return false; + } else { + assert(Init->isDelegatingInitializer()); + if (!this->emitThis(InitExpr)) + return false; + if (!this->visitInitializer(Init->getInit())) + return false; + if (!this->emitPopPtr(InitExpr)) + return false; + } + + if (!Scope.destroyLocals()) + return false; + } + } + + if (const auto *Body = F->getBody()) + if (!visitStmt(Body)) + return false; + + // Emit a guard return to protect against a code path missing one. + if (F->getReturnType()->isVoidType()) + return this->emitRetVoid(SourceInfo{}); + return this->emitNoRet(SourceInfo{}); +} + +template <class Emitter> +bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) { + const Expr *SubExpr = E->getSubExpr(); + if (SubExpr->getType()->isAnyComplexType()) + return this->VisitComplexUnaryOperator(E); + std::optional<PrimType> T = classify(SubExpr->getType()); + + switch (E->getOpcode()) { + case UO_PostInc: { // x++ + if (!Ctx.getLangOpts().CPlusPlus14) + return this->emitInvalid(E); + if (!T) + return this->emitError(E); + + if (!this->visit(SubExpr)) + return false; + + if (T == PT_Ptr || T == PT_FnPtr) { + if (!this->emitIncPtr(E)) + return false; + + return DiscardResult ? this->emitPopPtr(E) : true; + } + + if (T == PT_Float) { + return DiscardResult ? this->emitIncfPop(getRoundingMode(E), E) + : this->emitIncf(getRoundingMode(E), E); + } + + return DiscardResult ? this->emitIncPop(*T, E) : this->emitInc(*T, E); + } + case UO_PostDec: { // x-- + if (!Ctx.getLangOpts().CPlusPlus14) + return this->emitInvalid(E); + if (!T) + return this->emitError(E); + + if (!this->visit(SubExpr)) + return false; + + if (T == PT_Ptr || T == PT_FnPtr) { + if (!this->emitDecPtr(E)) + return false; + + return DiscardResult ? this->emitPopPtr(E) : true; + } + + if (T == PT_Float) { + return DiscardResult ? this->emitDecfPop(getRoundingMode(E), E) + : this->emitDecf(getRoundingMode(E), E); + } + + return DiscardResult ? this->emitDecPop(*T, E) : this->emitDec(*T, E); + } + case UO_PreInc: { // ++x + if (!Ctx.getLangOpts().CPlusPlus14) + return this->emitInvalid(E); + if (!T) + return this->emitError(E); + + if (!this->visit(SubExpr)) + return false; + + if (T == PT_Ptr || T == PT_FnPtr) { + if (!this->emitLoadPtr(E)) + return false; + if (!this->emitConstUint8(1, E)) + return false; + if (!this->emitAddOffsetUint8(E)) + return false; + return DiscardResult ? this->emitStorePopPtr(E) : this->emitStorePtr(E); + } + + // Post-inc and pre-inc are the same if the value is to be discarded. + if (DiscardResult) { + if (T == PT_Float) + return this->emitIncfPop(getRoundingMode(E), E); + return this->emitIncPop(*T, E); + } + + if (T == PT_Float) { + const auto &TargetSemantics = Ctx.getFloatSemantics(E->getType()); + if (!this->emitLoadFloat(E)) + return false; + if (!this->emitConstFloat(llvm::APFloat(TargetSemantics, 1), E)) + return false; + if (!this->emitAddf(getRoundingMode(E), E)) + return false; + if (!this->emitStoreFloat(E)) + return false; + } else { + assert(isIntegralType(*T)); + if (!this->emitLoad(*T, E)) + return false; + if (!this->emitConst(1, E)) + return false; + if (!this->emitAdd(*T, E)) + return false; + if (!this->emitStore(*T, E)) + return false; + } + return E->isGLValue() || this->emitLoadPop(*T, E); + } + case UO_PreDec: { // --x + if (!Ctx.getLangOpts().CPlusPlus14) + return this->emitInvalid(E); + if (!T) + return this->emitError(E); + + if (!this->visit(SubExpr)) + return false; + + if (T == PT_Ptr || T == PT_FnPtr) { + if (!this->emitLoadPtr(E)) + return false; + if (!this->emitConstUint8(1, E)) + return false; + if (!this->emitSubOffsetUint8(E)) + return false; + return DiscardResult ? this->emitStorePopPtr(E) : this->emitStorePtr(E); + } + + // Post-dec and pre-dec are the same if the value is to be discarded. + if (DiscardResult) { + if (T == PT_Float) + return this->emitDecfPop(getRoundingMode(E), E); + return this->emitDecPop(*T, E); + } + + if (T == PT_Float) { + const auto &TargetSemantics = Ctx.getFloatSemantics(E->getType()); + if (!this->emitLoadFloat(E)) + return false; + if (!this->emitConstFloat(llvm::APFloat(TargetSemantics, 1), E)) + return false; + if (!this->emitSubf(getRoundingMode(E), E)) + return false; + if (!this->emitStoreFloat(E)) + return false; + } else { + assert(isIntegralType(*T)); + if (!this->emitLoad(*T, E)) + return false; + if (!this->emitConst(1, E)) + return false; + if (!this->emitSub(*T, E)) + return false; + if (!this->emitStore(*T, E)) + return false; + } + return E->isGLValue() || this->emitLoadPop(*T, E); + } + case UO_LNot: // !x + if (!T) + return this->emitError(E); + + if (DiscardResult) + return this->discard(SubExpr); + + if (!this->visitBool(SubExpr)) + return false; + + if (!this->emitInvBool(E)) + return false; + + if (PrimType ET = classifyPrim(E->getType()); ET != PT_Bool) + return this->emitCast(PT_Bool, ET, E); + return true; + case UO_Minus: // -x + if (!T) + return this->emitError(E); + + if (!this->visit(SubExpr)) + return false; + return DiscardResult ? this->emitPop(*T, E) : this->emitNeg(*T, E); + case UO_Plus: // +x + if (!T) + return this->emitError(E); + + if (!this->visit(SubExpr)) // noop + return false; + return DiscardResult ? this->emitPop(*T, E) : true; + case UO_AddrOf: // &x + if (E->getType()->isMemberPointerType()) { + // C++11 [expr.unary.op]p3 has very strict rules on how the address of a + // member can be formed. + return this->emitGetMemberPtr(cast<DeclRefExpr>(SubExpr)->getDecl(), E); + } + // We should already have a pointer when we get here. + return this->delegate(SubExpr); + case UO_Deref: // *x + if (DiscardResult) + return this->discard(SubExpr); + return this->visit(SubExpr); + case UO_Not: // ~x + if (!T) + return this->emitError(E); + + if (!this->visit(SubExpr)) + return false; + return DiscardResult ? this->emitPop(*T, E) : this->emitComp(*T, E); + case UO_Real: // __real x + assert(T); + return this->delegate(SubExpr); + case UO_Imag: { // __imag x + assert(T); + if (!this->discard(SubExpr)) + return false; + return this->visitZeroInitializer(*T, SubExpr->getType(), SubExpr); + } + case UO_Extension: + return this->delegate(SubExpr); + case UO_Coawait: + assert(false && "Unhandled opcode"); + } + + return false; +} + +template <class Emitter> +bool Compiler<Emitter>::VisitComplexUnaryOperator(const UnaryOperator *E) { + const Expr *SubExpr = E->getSubExpr(); + assert(SubExpr->getType()->isAnyComplexType()); + + if (DiscardResult) + return this->discard(SubExpr); + + std::optional<PrimType> ResT = classify(E); + auto prepareResult = [=]() -> bool { + if (!ResT && !Initializing) { + std::optional<unsigned> LocalIndex = allocateLocal(SubExpr); + if (!LocalIndex) + return false; + return this->emitGetPtrLocal(*LocalIndex, E); + } + + return true; + }; + + // The offset of the temporary, if we created one. + unsigned SubExprOffset = ~0u; + auto createTemp = [=, &SubExprOffset]() -> bool { + SubExprOffset = this->allocateLocalPrimitive(SubExpr, PT_Ptr, true, false); + if (!this->visit(SubExpr)) + return false; + return this->emitSetLocal(PT_Ptr, SubExprOffset, E); + }; + + PrimType ElemT = classifyComplexElementType(SubExpr->getType()); + auto getElem = [=](unsigned Offset, unsigned Index) -> bool { + if (!this->emitGetLocal(PT_Ptr, Offset, E)) + return false; + return this->emitArrayElemPop(ElemT, Index, E); + }; + + switch (E->getOpcode()) { + case UO_Minus: + if (!prepareResult()) + return false; + if (!createTemp()) + return false; + for (unsigned I = 0; I != 2; ++I) { + if (!getElem(SubExprOffset, I)) + return false; + if (!this->emitNeg(ElemT, E)) + return false; + if (!this->emitInitElem(ElemT, I, E)) + return false; + } + break; + + case UO_Plus: // +x + case UO_AddrOf: // &x + case UO_Deref: // *x + return this->delegate(SubExpr); + + case UO_LNot: + if (!this->visit(SubExpr)) + return false; + if (!this->emitComplexBoolCast(SubExpr)) + return false; + if (!this->emitInvBool(E)) + return false; + if (PrimType ET = classifyPrim(E->getType()); ET != PT_Bool) + return this->emitCast(PT_Bool, ET, E); + return true; + + case UO_Real: + return this->emitComplexReal(SubExpr); + + case UO_Imag: + if (!this->visit(SubExpr)) + return false; + + if (SubExpr->isLValue()) { + if (!this->emitConstUint8(1, E)) + return false; + return this->emitArrayElemPtrPopUint8(E); + } + + // Since our _Complex implementation does not map to a primitive type, + // we sometimes have to do the lvalue-to-rvalue conversion here manually. + return this->emitArrayElemPop(classifyPrim(E->getType()), 1, E); + + case UO_Not: // ~x + if (!this->visit(SubExpr)) + return false; + // Negate the imaginary component. + if (!this->emitArrayElem(ElemT, 1, E)) + return false; + if (!this->emitNeg(ElemT, E)) + return false; + if (!this->emitInitElem(ElemT, 1, E)) + return false; + return DiscardResult ? this->emitPopPtr(E) : true; + + case UO_Extension: + return this->delegate(SubExpr); + + default: + return this->emitInvalid(E); + } + + return true; +} + +template <class Emitter> +bool Compiler<Emitter>::visitDeclRef(const ValueDecl *D, const Expr *E) { + if (DiscardResult) + return true; + + if (const auto *ECD = dyn_cast<EnumConstantDecl>(D)) { + return this->emitConst(ECD->getInitVal(), E); + } else if (const auto *BD = dyn_cast<BindingDecl>(D)) { + return this->visit(BD->getBinding()); + } else if (const auto *FuncDecl = dyn_cast<FunctionDecl>(D)) { + const Function *F = getFunction(FuncDecl); + return F && this->emitGetFnPtr(F, E); + } else if (const auto *TPOD = dyn_cast<TemplateParamObjectDecl>(D)) { + if (std::optional<unsigned> Index = P.getOrCreateGlobal(D)) { + if (!this->emitGetPtrGlobal(*Index, E)) + return false; + if (std::optional<PrimType> T = classify(E->getType())) { + if (!this->visitAPValue(TPOD->getValue(), *T, E)) + return false; + return this->emitInitGlobal(*T, *Index, E); + } + return this->visitAPValueInitializer(TPOD->getValue(), E); + } + return false; + } + + // References are implemented via pointers, so when we see a DeclRefExpr + // pointing to a reference, we need to get its value directly (i.e. the + // pointer to the actual value) instead of a pointer to the pointer to the + // value. + bool IsReference = D->getType()->isReferenceType(); + + // Check for local/global variables and parameters. + if (auto It = Locals.find(D); It != Locals.end()) { + const unsigned Offset = It->second.Offset; + if (IsReference) + return this->emitGetLocal(PT_Ptr, Offset, E); + return this->emitGetPtrLocal(Offset, E); + } else if (auto GlobalIndex = P.getGlobal(D)) { + if (IsReference) { + if (!Ctx.getLangOpts().CPlusPlus11) + return this->emitGetGlobal(classifyPrim(E), *GlobalIndex, E); + return this->emitGetGlobalUnchecked(classifyPrim(E), *GlobalIndex, E); + } + + return this->emitGetPtrGlobal(*GlobalIndex, E); + } else if (const auto *PVD = dyn_cast<ParmVarDecl>(D)) { + if (auto It = this->Params.find(PVD); It != this->Params.end()) { + if (IsReference || !It->second.IsPtr) + return this->emitGetParam(classifyPrim(E), It->second.Offset, E); + + return this->emitGetPtrParam(It->second.Offset, E); + } + } + + // In case we need to re-visit a declaration. + auto revisit = [&](const VarDecl *VD) -> bool { + auto VarState = this->visitDecl(VD); + + if (VarState.notCreated()) + return true; + if (!VarState) + return false; + // Retry. + return this->visitDeclRef(D, E); + }; + + // Handle lambda captures. + if (auto It = this->LambdaCaptures.find(D); + It != this->LambdaCaptures.end()) { + auto [Offset, IsPtr] = It->second; + + if (IsPtr) + return this->emitGetThisFieldPtr(Offset, E); + return this->emitGetPtrThisField(Offset, E); + } else if (const auto *DRE = dyn_cast<DeclRefExpr>(E); + DRE && DRE->refersToEnclosingVariableOrCapture()) { + if (const auto *VD = dyn_cast<VarDecl>(D); VD && VD->isInitCapture()) + return revisit(VD); + } + + if (D != InitializingDecl) { + // Try to lazily visit (or emit dummy pointers for) declarations + // we haven't seen yet. + if (Ctx.getLangOpts().CPlusPlus) { + if (const auto *VD = dyn_cast<VarDecl>(D)) { + const auto typeShouldBeVisited = [&](QualType T) -> bool { + if (T.isConstant(Ctx.getASTContext())) + return true; + if (const auto *RT = T->getAs<ReferenceType>()) + return RT->getPointeeType().isConstQualified(); + return false; + }; + + // Visit local const variables like normal. + if ((VD->hasGlobalStorage() || VD->isLocalVarDecl() || + VD->isStaticDataMember()) && + typeShouldBeVisited(VD->getType())) + return revisit(VD); + } + } else { + if (const auto *VD = dyn_cast<VarDecl>(D); + VD && VD->getAnyInitializer() && + VD->getType().isConstant(Ctx.getASTContext()) && !VD->isWeak()) + return revisit(VD); + } + } + + if (std::optional<unsigned> I = P.getOrCreateDummy(D)) { + if (!this->emitGetPtrGlobal(*I, E)) + return false; + if (E->getType()->isVoidType()) + return true; + // Convert the dummy pointer to another pointer type if we have to. + if (PrimType PT = classifyPrim(E); PT != PT_Ptr) { + if (isPtrType(PT)) + return this->emitDecayPtr(PT_Ptr, PT, E); + return false; + } + return true; + } + + if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) + return this->emitInvalidDeclRef(DRE, E); + return false; +} + +template <class Emitter> +bool Compiler<Emitter>::VisitDeclRefExpr(const DeclRefExpr *E) { + const auto *D = E->getDecl(); + return this->visitDeclRef(D, E); +} + +template <class Emitter> void Compiler<Emitter>::emitCleanup() { + for (VariableScope<Emitter> *C = VarScope; C; C = C->getParent()) + C->emitDestruction(); +} + +template <class Emitter> +unsigned Compiler<Emitter>::collectBaseOffset(const QualType BaseType, + const QualType DerivedType) { + const auto extractRecordDecl = [](QualType Ty) -> const CXXRecordDecl * { + if (const auto *PT = dyn_cast<PointerType>(Ty)) + return PT->getPointeeType()->getAsCXXRecordDecl(); + return Ty->getAsCXXRecordDecl(); + }; + const CXXRecordDecl *BaseDecl = extractRecordDecl(BaseType); + const CXXRecordDecl *DerivedDecl = extractRecordDecl(DerivedType); + + return Ctx.collectBaseOffset(BaseDecl, DerivedDecl); +} + +/// Emit casts from a PrimType to another PrimType. +template <class Emitter> +bool Compiler<Emitter>::emitPrimCast(PrimType FromT, PrimType ToT, + QualType ToQT, const Expr *E) { + + if (FromT == PT_Float) { + // Floating to floating. + if (ToT == PT_Float) { + const llvm::fltSemantics *ToSem = &Ctx.getFloatSemantics(ToQT); + return this->emitCastFP(ToSem, getRoundingMode(E), E); + } + + if (ToT == PT_IntAP) + return this->emitCastFloatingIntegralAP(Ctx.getBitWidth(ToQT), E); + if (ToT == PT_IntAPS) + return this->emitCastFloatingIntegralAPS(Ctx.getBitWidth(ToQT), E); + + // Float to integral. + if (isIntegralType(ToT) || ToT == PT_Bool) + return this->emitCastFloatingIntegral(ToT, E); + } + + if (isIntegralType(FromT) || FromT == PT_Bool) { + if (ToT == PT_IntAP) + return this->emitCastAP(FromT, Ctx.getBitWidth(ToQT), E); + if (ToT == PT_IntAPS) + return this->emitCastAPS(FromT, Ctx.getBitWidth(ToQT), E); + + // Integral to integral. + if (isIntegralType(ToT) || ToT == PT_Bool) + return FromT != ToT ? this->emitCast(FromT, ToT, E) : true; + + if (ToT == PT_Float) { + // Integral to floating. + const llvm::fltSemantics *ToSem = &Ctx.getFloatSemantics(ToQT); + return this->emitCastIntegralFloating(FromT, ToSem, getRoundingMode(E), + E); + } + } + + return false; +} + +/// Emits __real(SubExpr) +template <class Emitter> +bool Compiler<Emitter>::emitComplexReal(const Expr *SubExpr) { + assert(SubExpr->getType()->isAnyComplexType()); + + if (DiscardResult) + return this->discard(SubExpr); + + if (!this->visit(SubExpr)) + return false; + if (SubExpr->isLValue()) { + if (!this->emitConstUint8(0, SubExpr)) + return false; + return this->emitArrayElemPtrPopUint8(SubExpr); + } + + // Rvalue, load the actual element. + return this->emitArrayElemPop(classifyComplexElementType(SubExpr->getType()), + 0, SubExpr); +} + +template <class Emitter> +bool Compiler<Emitter>::emitComplexBoolCast(const Expr *E) { + assert(!DiscardResult); + PrimType ElemT = classifyComplexElementType(E->getType()); + // We emit the expression (__real(E) != 0 || __imag(E) != 0) + // for us, that means (bool)E[0] || (bool)E[1] + if (!this->emitArrayElem(ElemT, 0, E)) + return false; + if (ElemT == PT_Float) { + if (!this->emitCastFloatingIntegral(PT_Bool, E)) + return false; + } else { + if (!this->emitCast(ElemT, PT_Bool, E)) + return false; + } + + // We now have the bool value of E[0] on the stack. + LabelTy LabelTrue = this->getLabel(); + if (!this->jumpTrue(LabelTrue)) + return false; + + if (!this->emitArrayElemPop(ElemT, 1, E)) + return false; + if (ElemT == PT_Float) { + if (!this->emitCastFloatingIntegral(PT_Bool, E)) + return false; + } else { + if (!this->emitCast(ElemT, PT_Bool, E)) + return false; + } + // Leave the boolean value of E[1] on the stack. + LabelTy EndLabel = this->getLabel(); + this->jump(EndLabel); + + this->emitLabel(LabelTrue); + if (!this->emitPopPtr(E)) + return false; + if (!this->emitConstBool(true, E)) + return false; + + this->fallthrough(EndLabel); + this->emitLabel(EndLabel); + + return true; +} + +template <class Emitter> +bool Compiler<Emitter>::emitComplexComparison(const Expr *LHS, const Expr *RHS, + const BinaryOperator *E) { + assert(E->isComparisonOp()); + assert(!Initializing); + assert(!DiscardResult); + + PrimType ElemT; + bool LHSIsComplex; + unsigned LHSOffset; + if (LHS->getType()->isAnyComplexType()) { + LHSIsComplex = true; + ElemT = classifyComplexElementType(LHS->getType()); + LHSOffset = allocateLocalPrimitive(LHS, PT_Ptr, /*IsConst=*/true, + /*IsExtended=*/false); + if (!this->visit(LHS)) + return false; + if (!this->emitSetLocal(PT_Ptr, LHSOffset, E)) + return false; + } else { + LHSIsComplex = false; + PrimType LHST = classifyPrim(LHS->getType()); + LHSOffset = this->allocateLocalPrimitive(LHS, LHST, true, false); + if (!this->visit(LHS)) + return false; + if (!this->emitSetLocal(LHST, LHSOffset, E)) + return false; + } + + bool RHSIsComplex; + unsigned RHSOffset; + if (RHS->getType()->isAnyComplexType()) { + RHSIsComplex = true; + ElemT = classifyComplexElementType(RHS->getType()); + RHSOffset = allocateLocalPrimitive(RHS, PT_Ptr, /*IsConst=*/true, + /*IsExtended=*/false); + if (!this->visit(RHS)) + return false; + if (!this->emitSetLocal(PT_Ptr, RHSOffset, E)) + return false; + } else { + RHSIsComplex = false; + PrimType RHST = classifyPrim(RHS->getType()); + RHSOffset = this->allocateLocalPrimitive(RHS, RHST, true, false); + if (!this->visit(RHS)) + return false; + if (!this->emitSetLocal(RHST, RHSOffset, E)) + return false; + } + + auto getElem = [&](unsigned LocalOffset, unsigned Index, + bool IsComplex) -> bool { + if (IsComplex) { + if (!this->emitGetLocal(PT_Ptr, LocalOffset, E)) + return false; + return this->emitArrayElemPop(ElemT, Index, E); + } + return this->emitGetLocal(ElemT, LocalOffset, E); + }; + + for (unsigned I = 0; I != 2; ++I) { + // Get both values. + if (!getElem(LHSOffset, I, LHSIsComplex)) + return false; + if (!getElem(RHSOffset, I, RHSIsComplex)) + return false; + // And compare them. + if (!this->emitEQ(ElemT, E)) + return false; + + if (!this->emitCastBoolUint8(E)) + return false; + } + + // We now have two bool values on the stack. Compare those. + if (!this->emitAddUint8(E)) + return false; + if (!this->emitConstUint8(2, E)) + return false; + + if (E->getOpcode() == BO_EQ) { + if (!this->emitEQUint8(E)) + return false; + } else if (E->getOpcode() == BO_NE) { + if (!this->emitNEUint8(E)) + return false; + } else + return false; + + // In C, this returns an int. + if (PrimType ResT = classifyPrim(E->getType()); ResT != PT_Bool) + return this->emitCast(PT_Bool, ResT, E); + return true; +} + +/// When calling this, we have a pointer of the local-to-destroy +/// on the stack. +/// Emit destruction of record types (or arrays of record types). +template <class Emitter> +bool Compiler<Emitter>::emitRecordDestruction(const Record *R) { + assert(R); + // First, destroy all fields. + for (const Record::Field &Field : llvm::reverse(R->fields())) { + const Descriptor *D = Field.Desc; + if (!D->isPrimitive() && !D->isPrimitiveArray()) { + if (!this->emitGetPtrField(Field.Offset, SourceInfo{})) + return false; + if (!this->emitDestruction(D)) + return false; + if (!this->emitPopPtr(SourceInfo{})) + return false; + } + } + + // FIXME: Unions need to be handled differently here. We don't want to + // call the destructor of its members. + + // Now emit the destructor and recurse into base classes. + if (const CXXDestructorDecl *Dtor = R->getDestructor(); + Dtor && !Dtor->isTrivial()) { + const Function *DtorFunc = getFunction(Dtor); + if (!DtorFunc) + return false; + assert(DtorFunc->hasThisPointer()); + assert(DtorFunc->getNumParams() == 1); + if (!this->emitDupPtr(SourceInfo{})) + return false; + if (!this->emitCall(DtorFunc, 0, SourceInfo{})) + return false; + } + + for (const Record::Base &Base : llvm::reverse(R->bases())) { + if (!this->emitGetPtrBase(Base.Offset, SourceInfo{})) + return false; + if (!this->emitRecordDestruction(Base.R)) + return false; + if (!this->emitPopPtr(SourceInfo{})) + return false; + } + + // FIXME: Virtual bases. + return true; +} +/// When calling this, we have a pointer of the local-to-destroy +/// on the stack. +/// Emit destruction of record types (or arrays of record types). +template <class Emitter> +bool Compiler<Emitter>::emitDestruction(const Descriptor *Desc) { + assert(Desc); + assert(!Desc->isPrimitive()); + assert(!Desc->isPrimitiveArray()); + + // Arrays. + if (Desc->isArray()) { + const Descriptor *ElemDesc = Desc->ElemDesc; + assert(ElemDesc); + + // Don't need to do anything for these. + if (ElemDesc->isPrimitiveArray()) + return true; + + // If this is an array of record types, check if we need + // to call the element destructors at all. If not, try + // to save the work. + if (const Record *ElemRecord = ElemDesc->ElemRecord) { + if (const CXXDestructorDecl *Dtor = ElemRecord->getDestructor(); + !Dtor || Dtor->isTrivial()) + return true; + } + + for (ssize_t I = Desc->getNumElems() - 1; I >= 0; --I) { + if (!this->emitConstUint64(I, SourceInfo{})) + return false; + if (!this->emitArrayElemPtrUint64(SourceInfo{})) + return false; + if (!this->emitDestruction(ElemDesc)) + return false; + if (!this->emitPopPtr(SourceInfo{})) + return false; + } + return true; + } + + assert(Desc->ElemRecord); + return this->emitRecordDestruction(Desc->ElemRecord); +} + +namespace clang { +namespace interp { + +template class Compiler<ByteCodeEmitter>; +template class Compiler<EvalEmitter>; + +} // namespace interp +} // namespace clang diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Compiler.h b/contrib/llvm-project/clang/lib/AST/Interp/Compiler.h new file mode 100644 index 000000000000..084f5aef25f8 --- /dev/null +++ b/contrib/llvm-project/clang/lib/AST/Interp/Compiler.h @@ -0,0 +1,634 @@ +//===--- Compiler.h - Code generator for expressions -----*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Defines the constexpr bytecode compiler. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_AST_INTERP_BYTECODEEXPRGEN_H +#define LLVM_CLANG_AST_INTERP_BYTECODEEXPRGEN_H + +#include "ByteCodeEmitter.h" +#include "EvalEmitter.h" +#include "Pointer.h" +#include "PrimType.h" +#include "Record.h" +#include "clang/AST/Decl.h" +#include "clang/AST/Expr.h" +#include "clang/AST/StmtVisitor.h" +#include "clang/Basic/TargetInfo.h" + +namespace clang { +class QualType; + +namespace interp { + +template <class Emitter> class LocalScope; +template <class Emitter> class DestructorScope; +template <class Emitter> class VariableScope; +template <class Emitter> class DeclScope; +template <class Emitter> class InitLinkScope; +template <class Emitter> class InitStackScope; +template <class Emitter> class OptionScope; +template <class Emitter> class ArrayIndexScope; +template <class Emitter> class SourceLocScope; +template <class Emitter> class LoopScope; +template <class Emitter> class LabelScope; +template <class Emitter> class SwitchScope; +template <class Emitter> class StmtExprScope; + +template <class Emitter> class Compiler; +struct InitLink { +public: + enum { + K_This = 0, + K_Field = 1, + K_Temp = 2, + K_Decl = 3, + }; + + static InitLink This() { return InitLink{K_This}; } + static InitLink Field(unsigned Offset) { + InitLink IL{K_Field}; + IL.Offset = Offset; + return IL; + } + static InitLink Temp(unsigned Offset) { + InitLink IL{K_Temp}; + IL.Offset = Offset; + return IL; + } + static InitLink Decl(const ValueDecl *D) { + InitLink IL{K_Decl}; + IL.D = D; + return IL; + } + + InitLink(uint8_t Kind) : Kind(Kind) {} + template <class Emitter> + bool emit(Compiler<Emitter> *Ctx, const Expr *E) const; + + uint32_t Kind; + union { + unsigned Offset; + const ValueDecl *D; + }; +}; + +/// State encapsulating if a the variable creation has been successful, +/// unsuccessful, or no variable has been created at all. +struct VarCreationState { + std::optional<bool> S = std::nullopt; + VarCreationState() = default; + VarCreationState(bool b) : S(b) {} + static VarCreationState NotCreated() { return VarCreationState(); } + + operator bool() const { return S && *S; } + bool notCreated() const { return !S; } +}; + +/// Compilation context for expressions. +template <class Emitter> +class Compiler : public ConstStmtVisitor<Compiler<Emitter>, bool>, + public Emitter { +protected: + // Aliases for types defined in the emitter. + using LabelTy = typename Emitter::LabelTy; + using AddrTy = typename Emitter::AddrTy; + using OptLabelTy = std::optional<LabelTy>; + using CaseMap = llvm::DenseMap<const SwitchCase *, LabelTy>; + + /// Current compilation context. + Context &Ctx; + /// Program to link to. + Program &P; + +public: + /// Initializes the compiler and the backend emitter. + template <typename... Tys> + Compiler(Context &Ctx, Program &P, Tys &&...Args) + : Emitter(Ctx, P, Args...), Ctx(Ctx), P(P) {} + + // Expressions. + bool VisitCastExpr(const CastExpr *E); + bool VisitIntegerLiteral(const IntegerLiteral *E); + bool VisitFloatingLiteral(const FloatingLiteral *E); + bool VisitImaginaryLiteral(const ImaginaryLiteral *E); + bool VisitParenExpr(const ParenExpr *E); + bool VisitBinaryOperator(const BinaryOperator *E); + bool VisitLogicalBinOp(const BinaryOperator *E); + bool VisitPointerArithBinOp(const BinaryOperator *E); + bool VisitComplexBinOp(const BinaryOperator *E); + bool VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *E); + bool VisitCallExpr(const CallExpr *E); + bool VisitBuiltinCallExpr(const CallExpr *E); + bool VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *E); + bool VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E); + bool VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E); + bool VisitGNUNullExpr(const GNUNullExpr *E); + bool VisitCXXThisExpr(const CXXThisExpr *E); + bool VisitUnaryOperator(const UnaryOperator *E); + bool VisitComplexUnaryOperator(const UnaryOperator *E); + bool VisitDeclRefExpr(const DeclRefExpr *E); + bool VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E); + bool VisitSubstNonTypeTemplateParmExpr(const SubstNonTypeTemplateParmExpr *E); + bool VisitArraySubscriptExpr(const ArraySubscriptExpr *E); + bool VisitInitListExpr(const InitListExpr *E); + bool VisitCXXParenListInitExpr(const CXXParenListInitExpr *E); + bool VisitConstantExpr(const ConstantExpr *E); + bool VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E); + bool VisitMemberExpr(const MemberExpr *E); + bool VisitArrayInitIndexExpr(const ArrayInitIndexExpr *E); + bool VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E); + bool VisitOpaqueValueExpr(const OpaqueValueExpr *E); + bool VisitAbstractConditionalOperator(const AbstractConditionalOperator *E); + bool VisitStringLiteral(const StringLiteral *E); + bool VisitObjCStringLiteral(const ObjCStringLiteral *E); + bool VisitObjCEncodeExpr(const ObjCEncodeExpr *E); + bool VisitSYCLUniqueStableNameExpr(const SYCLUniqueStableNameExpr *E); + bool VisitCharacterLiteral(const CharacterLiteral *E); + bool VisitCompoundAssignOperator(const CompoundAssignOperator *E); + bool VisitFloatCompoundAssignOperator(const CompoundAssignOperator *E); + bool VisitPointerCompoundAssignOperator(const CompoundAssignOperator *E); + bool VisitExprWithCleanups(const ExprWithCleanups *E); + bool VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E); + bool VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *E); + bool VisitCompoundLiteralExpr(const CompoundLiteralExpr *E); + bool VisitTypeTraitExpr(const TypeTraitExpr *E); + bool VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E); + bool VisitLambdaExpr(const LambdaExpr *E); + bool VisitPredefinedExpr(const PredefinedExpr *E); + bool VisitCXXThrowExpr(const CXXThrowExpr *E); + bool VisitCXXReinterpretCastExpr(const CXXReinterpretCastExpr *E); + bool VisitCXXNoexceptExpr(const CXXNoexceptExpr *E); + bool VisitCXXConstructExpr(const CXXConstructExpr *E); + bool VisitSourceLocExpr(const SourceLocExpr *E); + bool VisitOffsetOfExpr(const OffsetOfExpr *E); + bool VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E); + bool VisitSizeOfPackExpr(const SizeOfPackExpr *E); + bool VisitGenericSelectionExpr(const GenericSelectionExpr *E); + bool VisitChooseExpr(const ChooseExpr *E); + bool VisitEmbedExpr(const EmbedExpr *E); + bool VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E); + bool VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E); + bool VisitExpressionTraitExpr(const ExpressionTraitExpr *E); + bool VisitCXXUuidofExpr(const CXXUuidofExpr *E); + bool VisitRequiresExpr(const RequiresExpr *E); + bool VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E); + bool VisitCXXRewrittenBinaryOperator(const CXXRewrittenBinaryOperator *E); + bool VisitPseudoObjectExpr(const PseudoObjectExpr *E); + bool VisitPackIndexingExpr(const PackIndexingExpr *E); + bool VisitRecoveryExpr(const RecoveryExpr *E); + bool VisitAddrLabelExpr(const AddrLabelExpr *E); + bool VisitConvertVectorExpr(const ConvertVectorExpr *E); + bool VisitShuffleVectorExpr(const ShuffleVectorExpr *E); + bool VisitExtVectorElementExpr(const ExtVectorElementExpr *E); + bool VisitObjCBoxedExpr(const ObjCBoxedExpr *E); + bool VisitCXXStdInitializerListExpr(const CXXStdInitializerListExpr *E); + bool VisitStmtExpr(const StmtExpr *E); + bool VisitCXXNewExpr(const CXXNewExpr *E); + bool VisitCXXDeleteExpr(const CXXDeleteExpr *E); + + // Statements. + bool visitCompoundStmt(const CompoundStmt *S); + bool visitLoopBody(const Stmt *S); + bool visitDeclStmt(const DeclStmt *DS); + bool visitReturnStmt(const ReturnStmt *RS); + bool visitIfStmt(const IfStmt *IS); + bool visitWhileStmt(const WhileStmt *S); + bool visitDoStmt(const DoStmt *S); + bool visitForStmt(const ForStmt *S); + bool visitCXXForRangeStmt(const CXXForRangeStmt *S); + bool visitBreakStmt(const BreakStmt *S); + bool visitContinueStmt(const ContinueStmt *S); + bool visitSwitchStmt(const SwitchStmt *S); + bool visitCaseStmt(const CaseStmt *S); + bool visitDefaultStmt(const DefaultStmt *S); + bool visitAttributedStmt(const AttributedStmt *S); + bool visitCXXTryStmt(const CXXTryStmt *S); + +protected: + bool visitStmt(const Stmt *S); + bool visitExpr(const Expr *E) override; + bool visitFunc(const FunctionDecl *F) override; + + bool visitDeclAndReturn(const VarDecl *VD, bool ConstantContext) override; + +protected: + /// Emits scope cleanup instructions. + void emitCleanup(); + + /// Returns a record type from a record or pointer type. + const RecordType *getRecordTy(QualType Ty); + + /// Returns a record from a record or pointer type. + Record *getRecord(QualType Ty); + Record *getRecord(const RecordDecl *RD); + + /// Returns a function for the given FunctionDecl. + /// If the function does not exist yet, it is compiled. + const Function *getFunction(const FunctionDecl *FD); + + std::optional<PrimType> classify(const Expr *E) const { + return Ctx.classify(E); + } + std::optional<PrimType> classify(QualType Ty) const { + return Ctx.classify(Ty); + } + + /// Classifies a known primitive type. + PrimType classifyPrim(QualType Ty) const { + if (auto T = classify(Ty)) { + return *T; + } + llvm_unreachable("not a primitive type"); + } + /// Classifies a known primitive expression. + PrimType classifyPrim(const Expr *E) const { + if (auto T = classify(E)) + return *T; + llvm_unreachable("not a primitive type"); + } + + /// Evaluates an expression and places the result on the stack. If the + /// expression is of composite type, a local variable will be created + /// and a pointer to said variable will be placed on the stack. + bool visit(const Expr *E); + /// Compiles an initializer. This is like visit() but it will never + /// create a variable and instead rely on a variable already having + /// been created. visitInitializer() then relies on a pointer to this + /// variable being on top of the stack. + bool visitInitializer(const Expr *E); + /// Evaluates an expression for side effects and discards the result. + bool discard(const Expr *E); + /// Just pass evaluation on to \p E. This leaves all the parsing flags + /// intact. + bool delegate(const Expr *E); + /// Creates and initializes a variable from the given decl. + VarCreationState visitVarDecl(const VarDecl *VD, bool Toplevel = false); + VarCreationState visitDecl(const VarDecl *VD); + /// Visit an APValue. + bool visitAPValue(const APValue &Val, PrimType ValType, const Expr *E); + bool visitAPValueInitializer(const APValue &Val, const Expr *E); + /// Visit the given decl as if we have a reference to it. + bool visitDeclRef(const ValueDecl *D, const Expr *E); + + /// Visits an expression and converts it to a boolean. + bool visitBool(const Expr *E); + + bool visitInitList(ArrayRef<const Expr *> Inits, const Expr *ArrayFiller, + const Expr *E); + bool visitArrayElemInit(unsigned ElemIndex, const Expr *Init); + + /// Creates a local primitive value. + unsigned allocateLocalPrimitive(DeclTy &&Decl, PrimType Ty, bool IsConst, + bool IsExtended = false); + + /// Allocates a space storing a local given its type. + std::optional<unsigned> + allocateLocal(DeclTy &&Decl, const ValueDecl *ExtendingDecl = nullptr); + +private: + friend class VariableScope<Emitter>; + friend class LocalScope<Emitter>; + friend class DestructorScope<Emitter>; + friend class DeclScope<Emitter>; + friend class InitLinkScope<Emitter>; + friend class InitStackScope<Emitter>; + friend class OptionScope<Emitter>; + friend class ArrayIndexScope<Emitter>; + friend class SourceLocScope<Emitter>; + friend struct InitLink; + friend class LoopScope<Emitter>; + friend class LabelScope<Emitter>; + friend class SwitchScope<Emitter>; + friend class StmtExprScope<Emitter>; + + /// Emits a zero initializer. + bool visitZeroInitializer(PrimType T, QualType QT, const Expr *E); + bool visitZeroRecordInitializer(const Record *R, const Expr *E); + + /// Emits an APSInt constant. + bool emitConst(const llvm::APSInt &Value, PrimType Ty, const Expr *E); + bool emitConst(const llvm::APSInt &Value, const Expr *E); + bool emitConst(const llvm::APInt &Value, const Expr *E) { + return emitConst(static_cast<llvm::APSInt>(Value), E); + } + + /// Emits an integer constant. + template <typename T> bool emitConst(T Value, PrimType Ty, const Expr *E); + template <typename T> bool emitConst(T Value, const Expr *E); + + llvm::RoundingMode getRoundingMode(const Expr *E) const { + FPOptions FPO = E->getFPFeaturesInEffect(Ctx.getLangOpts()); + + if (FPO.getRoundingMode() == llvm::RoundingMode::Dynamic) + return llvm::RoundingMode::NearestTiesToEven; + + return FPO.getRoundingMode(); + } + + bool emitPrimCast(PrimType FromT, PrimType ToT, QualType ToQT, const Expr *E); + PrimType classifyComplexElementType(QualType T) const { + assert(T->isAnyComplexType()); + + QualType ElemType = T->getAs<ComplexType>()->getElementType(); + + return *this->classify(ElemType); + } + + bool emitComplexReal(const Expr *SubExpr); + bool emitComplexBoolCast(const Expr *E); + bool emitComplexComparison(const Expr *LHS, const Expr *RHS, + const BinaryOperator *E); + + bool emitRecordDestruction(const Record *R); + bool emitDestruction(const Descriptor *Desc); + unsigned collectBaseOffset(const QualType BaseType, + const QualType DerivedType); + bool emitLambdaStaticInvokerBody(const CXXMethodDecl *MD); + +protected: + /// Variable to storage mapping. + llvm::DenseMap<const ValueDecl *, Scope::Local> Locals; + + /// OpaqueValueExpr to location mapping. + llvm::DenseMap<const OpaqueValueExpr *, unsigned> OpaqueExprs; + + /// Current scope. + VariableScope<Emitter> *VarScope = nullptr; + + /// Current argument index. Needed to emit ArrayInitIndexExpr. + std::optional<uint64_t> ArrayIndex; + + /// DefaultInit- or DefaultArgExpr, needed for SourceLocExpr. + const Expr *SourceLocDefaultExpr = nullptr; + + /// Flag indicating if return value is to be discarded. + bool DiscardResult = false; + + bool InStmtExpr = false; + + /// Flag inidicating if we're initializing an already created + /// variable. This is set in visitInitializer(). + bool Initializing = false; + const ValueDecl *InitializingDecl = nullptr; + + llvm::SmallVector<InitLink> InitStack; + bool InitStackActive = false; + + /// Flag indicating if we're initializing a global variable. + bool GlobalDecl = false; + + /// Type of the expression returned by the function. + std::optional<PrimType> ReturnType; + + /// Switch case mapping. + CaseMap CaseLabels; + + /// Point to break to. + OptLabelTy BreakLabel; + /// Point to continue to. + OptLabelTy ContinueLabel; + /// Default case label. + OptLabelTy DefaultLabel; +}; + +extern template class Compiler<ByteCodeEmitter>; +extern template class Compiler<EvalEmitter>; + +/// Scope chain managing the variable lifetimes. +template <class Emitter> class VariableScope { +public: + VariableScope(Compiler<Emitter> *Ctx, const ValueDecl *VD) + : Ctx(Ctx), Parent(Ctx->VarScope), ValDecl(VD) { + Ctx->VarScope = this; + } + + virtual ~VariableScope() { Ctx->VarScope = this->Parent; } + + void add(const Scope::Local &Local, bool IsExtended) { + if (IsExtended) + this->addExtended(Local); + else + this->addLocal(Local); + } + + virtual void addLocal(const Scope::Local &Local) { + if (this->Parent) + this->Parent->addLocal(Local); + } + + virtual void addExtended(const Scope::Local &Local) { + if (this->Parent) + this->Parent->addExtended(Local); + } + + void addExtended(const Scope::Local &Local, const ValueDecl *ExtendingDecl) { + // Walk up the chain of scopes until we find the one for ExtendingDecl. + // If there is no such scope, attach it to the parent one. + VariableScope *P = this; + while (P) { + if (P->ValDecl == ExtendingDecl) { + P->addLocal(Local); + return; + } + P = P->Parent; + if (!P) + break; + } + + // Use the parent scope. + addExtended(Local); + } + + virtual void emitDestruction() {} + virtual bool emitDestructors() { return true; } + VariableScope *getParent() const { return Parent; } + +protected: + /// Compiler instance. + Compiler<Emitter> *Ctx; + /// Link to the parent scope. + VariableScope *Parent; + const ValueDecl *ValDecl = nullptr; +}; + +/// Generic scope for local variables. +template <class Emitter> class LocalScope : public VariableScope<Emitter> { +public: + LocalScope(Compiler<Emitter> *Ctx) : VariableScope<Emitter>(Ctx, nullptr) {} + LocalScope(Compiler<Emitter> *Ctx, const ValueDecl *VD) + : VariableScope<Emitter>(Ctx, VD) {} + + /// Emit a Destroy op for this scope. + ~LocalScope() override { + if (!Idx) + return; + this->Ctx->emitDestroy(*Idx, SourceInfo{}); + removeStoredOpaqueValues(); + } + + /// Overriden to support explicit destruction. + void emitDestruction() override { destroyLocals(); } + + /// Explicit destruction of local variables. + bool destroyLocals() { + if (!Idx) + return true; + + bool Success = this->emitDestructors(); + this->Ctx->emitDestroy(*Idx, SourceInfo{}); + removeStoredOpaqueValues(); + this->Idx = std::nullopt; + return Success; + } + + void addLocal(const Scope::Local &Local) override { + if (!Idx) { + Idx = this->Ctx->Descriptors.size(); + this->Ctx->Descriptors.emplace_back(); + } + + this->Ctx->Descriptors[*Idx].emplace_back(Local); + } + + bool emitDestructors() override { + if (!Idx) + return true; + // Emit destructor calls for local variables of record + // type with a destructor. + for (Scope::Local &Local : this->Ctx->Descriptors[*Idx]) { + if (!Local.Desc->isPrimitive() && !Local.Desc->isPrimitiveArray()) { + if (!this->Ctx->emitGetPtrLocal(Local.Offset, SourceInfo{})) + return false; + + if (!this->Ctx->emitDestruction(Local.Desc)) + return false; + + if (!this->Ctx->emitPopPtr(SourceInfo{})) + return false; + removeIfStoredOpaqueValue(Local); + } + } + return true; + } + + void removeStoredOpaqueValues() { + if (!Idx) + return; + + for (const Scope::Local &Local : this->Ctx->Descriptors[*Idx]) { + removeIfStoredOpaqueValue(Local); + } + } + + void removeIfStoredOpaqueValue(const Scope::Local &Local) { + if (const auto *OVE = + llvm::dyn_cast_if_present<OpaqueValueExpr>(Local.Desc->asExpr())) { + if (auto It = this->Ctx->OpaqueExprs.find(OVE); + It != this->Ctx->OpaqueExprs.end()) + this->Ctx->OpaqueExprs.erase(It); + }; + } + + /// Index of the scope in the chain. + std::optional<unsigned> Idx; +}; + +/// Emits the destructors of the variables of \param OtherScope +/// when this scope is destroyed. Does not create a Scope in the bytecode at +/// all, this is just a RAII object to emit destructors. +template <class Emitter> class DestructorScope final { +public: + DestructorScope(LocalScope<Emitter> &OtherScope) : OtherScope(OtherScope) {} + + ~DestructorScope() { OtherScope.emitDestructors(); } + +private: + LocalScope<Emitter> &OtherScope; +}; + +/// Scope for storage declared in a compound statement. +template <class Emitter> class BlockScope final : public LocalScope<Emitter> { +public: + BlockScope(Compiler<Emitter> *Ctx) : LocalScope<Emitter>(Ctx) {} + + void addExtended(const Scope::Local &Local) override { + // If we to this point, just add the variable as a normal local + // variable. It will be destroyed at the end of the block just + // like all others. + this->addLocal(Local); + } +}; + +template <class Emitter> class ArrayIndexScope final { +public: + ArrayIndexScope(Compiler<Emitter> *Ctx, uint64_t Index) : Ctx(Ctx) { + OldArrayIndex = Ctx->ArrayIndex; + Ctx->ArrayIndex = Index; + } + + ~ArrayIndexScope() { Ctx->ArrayIndex = OldArrayIndex; } + +private: + Compiler<Emitter> *Ctx; + std::optional<uint64_t> OldArrayIndex; +}; + +template <class Emitter> class SourceLocScope final { +public: + SourceLocScope(Compiler<Emitter> *Ctx, const Expr *DefaultExpr) : Ctx(Ctx) { + assert(DefaultExpr); + // We only switch if the current SourceLocDefaultExpr is null. + if (!Ctx->SourceLocDefaultExpr) { + Enabled = true; + Ctx->SourceLocDefaultExpr = DefaultExpr; + } + } + + ~SourceLocScope() { + if (Enabled) + Ctx->SourceLocDefaultExpr = nullptr; + } + +private: + Compiler<Emitter> *Ctx; + bool Enabled = false; +}; + +template <class Emitter> class InitLinkScope final { +public: + InitLinkScope(Compiler<Emitter> *Ctx, InitLink &&Link) : Ctx(Ctx) { + Ctx->InitStack.push_back(std::move(Link)); + } + + ~InitLinkScope() { this->Ctx->InitStack.pop_back(); } + +private: + Compiler<Emitter> *Ctx; +}; + +template <class Emitter> class InitStackScope final { +public: + InitStackScope(Compiler<Emitter> *Ctx, bool Active) + : Ctx(Ctx), OldValue(Ctx->InitStackActive) { + Ctx->InitStackActive = Active; + } + + ~InitStackScope() { this->Ctx->InitStackActive = OldValue; } + +private: + Compiler<Emitter> *Ctx; + bool OldValue; +}; + +} // namespace interp +} // namespace clang + +#endif diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Context.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Context.cpp index 3bfcdfcd4c58..b5e992c5a9ac 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/Context.cpp +++ b/contrib/llvm-project/clang/lib/AST/Interp/Context.cpp @@ -8,8 +8,7 @@ #include "Context.h" #include "ByteCodeEmitter.h" -#include "ByteCodeExprGen.h" -#include "ByteCodeStmtGen.h" +#include "Compiler.h" #include "EvalEmitter.h" #include "Interp.h" #include "InterpFrame.h" @@ -27,46 +26,112 @@ Context::Context(ASTContext &Ctx) : Ctx(Ctx), P(new Program(*this)) {} Context::~Context() {} bool Context::isPotentialConstantExpr(State &Parent, const FunctionDecl *FD) { + assert(Stk.empty()); Function *Func = P->getFunction(FD); - if (!Func) { - if (auto R = ByteCodeStmtGen<ByteCodeEmitter>(*this, *P).compileFunc(FD)) { - Func = *R; - } else { - handleAllErrors(R.takeError(), [&Parent](ByteCodeGenError &Err) { - Parent.FFDiag(Err.getLoc(), diag::err_experimental_clang_interp_failed); - }); - return false; - } - } + if (!Func || !Func->hasBody()) + Func = Compiler<ByteCodeEmitter>(*this, *P).compileFunc(FD); - if (!Func->isConstexpr()) + if (!Func) return false; - APValue Dummy; - return Run(Parent, Func, Dummy); + APValue DummyResult; + if (!Run(Parent, Func, DummyResult)) + return false; + + return Func->isConstexpr(); } bool Context::evaluateAsRValue(State &Parent, const Expr *E, APValue &Result) { - ByteCodeExprGen<EvalEmitter> C(*this, *P, Parent, Stk, Result); - return Check(Parent, C.interpretExpr(E)); + ++EvalID; + bool Recursing = !Stk.empty(); + Compiler<EvalEmitter> C(*this, *P, Parent, Stk); + + auto Res = C.interpretExpr(E, /*ConvertResultToRValue=*/E->isGLValue()); + + if (Res.isInvalid()) { + C.cleanup(); + Stk.clear(); + return false; + } + + if (!Recursing) { + assert(Stk.empty()); +#ifndef NDEBUG + // Make sure we don't rely on some value being still alive in + // InterpStack memory. + Stk.clear(); +#endif + } + + Result = Res.toAPValue(); + + return true; +} + +bool Context::evaluate(State &Parent, const Expr *E, APValue &Result) { + ++EvalID; + bool Recursing = !Stk.empty(); + Compiler<EvalEmitter> C(*this, *P, Parent, Stk); + + auto Res = C.interpretExpr(E); + if (Res.isInvalid()) { + C.cleanup(); + Stk.clear(); + return false; + } + + if (!Recursing) { + assert(Stk.empty()); +#ifndef NDEBUG + // Make sure we don't rely on some value being still alive in + // InterpStack memory. + Stk.clear(); +#endif + } + + Result = Res.toAPValue(); + return true; } bool Context::evaluateAsInitializer(State &Parent, const VarDecl *VD, APValue &Result) { - ByteCodeExprGen<EvalEmitter> C(*this, *P, Parent, Stk, Result); - return Check(Parent, C.interpretDecl(VD)); -} + ++EvalID; + bool Recursing = !Stk.empty(); + Compiler<EvalEmitter> C(*this, *P, Parent, Stk); -const LangOptions &Context::getLangOpts() const { return Ctx.getLangOpts(); } + bool CheckGlobalInitialized = + shouldBeGloballyIndexed(VD) && + (VD->getType()->isRecordType() || VD->getType()->isArrayType()); + auto Res = C.interpretDecl(VD, CheckGlobalInitialized); + if (Res.isInvalid()) { + C.cleanup(); + Stk.clear(); + return false; + } -llvm::Optional<PrimType> Context::classify(QualType T) { - if (T->isReferenceType() || T->isPointerType()) { - return PT_Ptr; + if (!Recursing) { + assert(Stk.empty()); +#ifndef NDEBUG + // Make sure we don't rely on some value being still alive in + // InterpStack memory. + Stk.clear(); +#endif } + Result = Res.toAPValue(); + return true; +} + +const LangOptions &Context::getLangOpts() const { return Ctx.getLangOpts(); } + +std::optional<PrimType> Context::classify(QualType T) const { if (T->isBooleanType()) return PT_Bool; + // We map these to primitive arrays. + if (T->isAnyComplexType() || T->isVectorType()) + return std::nullopt; + if (T->isSignedIntegerOrEnumerationType()) { switch (Ctx.getIntWidth(T)) { case 64: @@ -78,7 +143,7 @@ llvm::Optional<PrimType> Context::classify(QualType T) { case 8: return PT_Sint8; default: - return {}; + return PT_IntAPS; } } @@ -93,37 +158,152 @@ llvm::Optional<PrimType> Context::classify(QualType T) { case 8: return PT_Uint8; default: - return {}; + return PT_IntAP; } } if (T->isNullPtrType()) return PT_Ptr; - if (auto *AT = dyn_cast<AtomicType>(T)) + if (T->isFloatingType()) + return PT_Float; + + if (T->isSpecificBuiltinType(BuiltinType::BoundMember) || + T->isMemberPointerType()) + return PT_MemberPtr; + + if (T->isFunctionPointerType() || T->isFunctionReferenceType() || + T->isFunctionType()) + return PT_FnPtr; + + if (T->isReferenceType() || T->isPointerType() || + T->isObjCObjectPointerType()) + return PT_Ptr; + + if (const auto *AT = T->getAs<AtomicType>()) return classify(AT->getValueType()); - return {}; + if (const auto *DT = dyn_cast<DecltypeType>(T)) + return classify(DT->getUnderlyingType()); + + return std::nullopt; } unsigned Context::getCharBit() const { return Ctx.getTargetInfo().getCharWidth(); } -bool Context::Run(State &Parent, Function *Func, APValue &Result) { - InterpState State(Parent, *P, Stk, *this); - State.Current = new InterpFrame(State, Func, nullptr, {}, {}); - if (Interpret(State, Result)) - return true; +/// Simple wrapper around getFloatTypeSemantics() to make code a +/// little shorter. +const llvm::fltSemantics &Context::getFloatSemantics(QualType T) const { + return Ctx.getFloatTypeSemantics(T); +} + +bool Context::Run(State &Parent, const Function *Func, APValue &Result) { + + { + InterpState State(Parent, *P, Stk, *this); + State.Current = new InterpFrame(State, Func, /*Caller=*/nullptr, CodePtr(), + Func->getArgSize()); + if (Interpret(State, Result)) { + assert(Stk.empty()); + return true; + } + + // State gets destroyed here, so the Stk.clear() below doesn't accidentally + // remove values the State's destructor might access. + } + Stk.clear(); return false; } -bool Context::Check(State &Parent, llvm::Expected<bool> &&Flag) { - if (Flag) - return *Flag; - handleAllErrors(Flag.takeError(), [&Parent](ByteCodeGenError &Err) { - Parent.FFDiag(Err.getLoc(), diag::err_experimental_clang_interp_failed); - }); - return false; +// TODO: Virtual bases? +const CXXMethodDecl * +Context::getOverridingFunction(const CXXRecordDecl *DynamicDecl, + const CXXRecordDecl *StaticDecl, + const CXXMethodDecl *InitialFunction) const { + assert(DynamicDecl); + assert(StaticDecl); + assert(InitialFunction); + + const CXXRecordDecl *CurRecord = DynamicDecl; + const CXXMethodDecl *FoundFunction = InitialFunction; + for (;;) { + const CXXMethodDecl *Overrider = + FoundFunction->getCorrespondingMethodDeclaredInClass(CurRecord, false); + if (Overrider) + return Overrider; + + // Common case of only one base class. + if (CurRecord->getNumBases() == 1) { + CurRecord = CurRecord->bases_begin()->getType()->getAsCXXRecordDecl(); + continue; + } + + // Otherwise, go to the base class that will lead to the StaticDecl. + for (const CXXBaseSpecifier &Spec : CurRecord->bases()) { + const CXXRecordDecl *Base = Spec.getType()->getAsCXXRecordDecl(); + if (Base == StaticDecl || Base->isDerivedFrom(StaticDecl)) { + CurRecord = Base; + break; + } + } + } + + llvm_unreachable( + "Couldn't find an overriding function in the class hierarchy?"); + return nullptr; +} + +const Function *Context::getOrCreateFunction(const FunctionDecl *FD) { + assert(FD); + const Function *Func = P->getFunction(FD); + bool IsBeingCompiled = Func && Func->isDefined() && !Func->isFullyCompiled(); + bool WasNotDefined = Func && !Func->isConstexpr() && !Func->isDefined(); + + if (IsBeingCompiled) + return Func; + + if (!Func || WasNotDefined) { + if (auto F = Compiler<ByteCodeEmitter>(*this, *P).compileFunc(FD)) + Func = F; + } + + return Func; +} + +unsigned Context::collectBaseOffset(const RecordDecl *BaseDecl, + const RecordDecl *DerivedDecl) const { + assert(BaseDecl); + assert(DerivedDecl); + const auto *FinalDecl = cast<CXXRecordDecl>(BaseDecl); + const RecordDecl *CurDecl = DerivedDecl; + const Record *CurRecord = P->getOrCreateRecord(CurDecl); + assert(CurDecl && FinalDecl); + + unsigned OffsetSum = 0; + for (;;) { + assert(CurRecord->getNumBases() > 0); + // One level up + for (const Record::Base &B : CurRecord->bases()) { + const auto *BaseDecl = cast<CXXRecordDecl>(B.Decl); + + if (BaseDecl == FinalDecl || BaseDecl->isDerivedFrom(FinalDecl)) { + OffsetSum += B.Offset; + CurRecord = B.R; + CurDecl = BaseDecl; + break; + } + } + if (CurDecl == FinalDecl) + break; + } + + assert(OffsetSum > 0); + return OffsetSum; +} + +const Record *Context::getRecord(const RecordDecl *D) const { + return P->getOrCreateRecord(D); } diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Context.h b/contrib/llvm-project/clang/lib/AST/Interp/Context.h index e8238eea716a..b8ea4ad6b3b4 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/Context.h +++ b/contrib/llvm-project/clang/lib/AST/Interp/Context.h @@ -17,15 +17,13 @@ #define LLVM_CLANG_AST_INTERP_CONTEXT_H #include "InterpStack.h" -#include "clang/AST/APValue.h" -#include "llvm/ADT/PointerIntPair.h" namespace clang { class ASTContext; class LangOptions; -class Stmt; class FunctionDecl; class VarDecl; +class APValue; namespace interp { class Function; @@ -33,8 +31,13 @@ class Program; class State; enum PrimType : unsigned; +struct ParamOffset { + unsigned Offset; + bool IsPtr; +}; + /// Holds all information required to evaluate constexpr code in a module. -class Context { +class Context final { public: /// Initialises the constexpr VM. Context(ASTContext &Ctx); @@ -48,6 +51,9 @@ public: /// Evaluates a toplevel expression as an rvalue. bool evaluateAsRValue(State &Parent, const Expr *E, APValue &Result); + /// Like evaluateAsRvalue(), but does no implicit lvalue-to-rvalue conversion. + bool evaluate(State &Parent, const Expr *E, APValue &Result); + /// Evaluates a toplevel initializer. bool evaluateAsInitializer(State &Parent, const VarDecl *VD, APValue &Result); @@ -59,24 +65,64 @@ public: InterpStack &getStack() { return Stk; } /// Returns CHAR_BIT. unsigned getCharBit() const; + /// Return the floating-point semantics for T. + const llvm::fltSemantics &getFloatSemantics(QualType T) const; + /// Return the size of T in bits. + uint32_t getBitWidth(QualType T) const { return Ctx.getIntWidth(T); } + + /// Classifies a type. + std::optional<PrimType> classify(QualType T) const; /// Classifies an expression. - llvm::Optional<PrimType> classify(QualType T); + std::optional<PrimType> classify(const Expr *E) const { + assert(E); + if (E->isGLValue()) { + if (E->getType()->isFunctionType()) + return PT_FnPtr; + return PT_Ptr; + } -private: - /// Runs a function. - bool Run(State &Parent, Function *Func, APValue &Result); + return classify(E->getType()); + } + + const CXXMethodDecl * + getOverridingFunction(const CXXRecordDecl *DynamicDecl, + const CXXRecordDecl *StaticDecl, + const CXXMethodDecl *InitialFunction) const; + + const Function *getOrCreateFunction(const FunctionDecl *FD); - /// Checks a result fromt the interpreter. - bool Check(State &Parent, llvm::Expected<bool> &&R); + /// Returns whether we should create a global variable for the + /// given ValueDecl. + static bool shouldBeGloballyIndexed(const ValueDecl *VD) { + if (const auto *V = dyn_cast<VarDecl>(VD)) + return V->hasGlobalStorage() || V->isConstexpr(); + + return false; + } + + /// Returns the program. This is only needed for unittests. + Program &getProgram() const { return *P.get(); } + + unsigned collectBaseOffset(const RecordDecl *BaseDecl, + const RecordDecl *DerivedDecl) const; + + const Record *getRecord(const RecordDecl *D) const; + + unsigned getEvalID() const { return EvalID; } private: + /// Runs a function. + bool Run(State &Parent, const Function *Func, APValue &Result); + /// Current compilation context. ASTContext &Ctx; /// Interpreter stack, shared across invocations. InterpStack Stk; /// Constexpr program. std::unique_ptr<Program> P; + /// ID identifying an evaluation. + unsigned EvalID = 0; }; } // namespace interp diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.cpp index 5c1a8a9cf306..4f7e9eac76a3 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.cpp +++ b/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.cpp @@ -7,6 +7,11 @@ //===----------------------------------------------------------------------===// #include "Descriptor.h" +#include "Boolean.h" +#include "Floating.h" +#include "FunctionPointer.h" +#include "IntegralAP.h" +#include "MemberPointer.h" #include "Pointer.h" #include "PrimType.h" #include "Record.h" @@ -15,46 +20,69 @@ using namespace clang; using namespace clang::interp; template <typename T> -static void ctorTy(Block *, char *Ptr, bool, bool, bool, Descriptor *) { +static void ctorTy(Block *, std::byte *Ptr, bool, bool, bool, + const Descriptor *) { new (Ptr) T(); } -template <typename T> static void dtorTy(Block *, char *Ptr, Descriptor *) { +template <typename T> +static void dtorTy(Block *, std::byte *Ptr, const Descriptor *) { reinterpret_cast<T *>(Ptr)->~T(); } template <typename T> -static void moveTy(Block *, char *Src, char *Dst, Descriptor *) { - auto *SrcPtr = reinterpret_cast<T *>(Src); +static void moveTy(Block *, const std::byte *Src, std::byte *Dst, + const Descriptor *) { + const auto *SrcPtr = reinterpret_cast<const T *>(Src); auto *DstPtr = reinterpret_cast<T *>(Dst); new (DstPtr) T(std::move(*SrcPtr)); } template <typename T> -static void ctorArrayTy(Block *, char *Ptr, bool, bool, bool, Descriptor *D) { +static void ctorArrayTy(Block *, std::byte *Ptr, bool, bool, bool, + const Descriptor *D) { + new (Ptr) InitMapPtr(std::nullopt); + + Ptr += sizeof(InitMapPtr); for (unsigned I = 0, NE = D->getNumElems(); I < NE; ++I) { new (&reinterpret_cast<T *>(Ptr)[I]) T(); } } template <typename T> -static void dtorArrayTy(Block *, char *Ptr, Descriptor *D) { +static void dtorArrayTy(Block *, std::byte *Ptr, const Descriptor *D) { + InitMapPtr &IMP = *reinterpret_cast<InitMapPtr *>(Ptr); + + if (IMP) + IMP = std::nullopt; + Ptr += sizeof(InitMapPtr); for (unsigned I = 0, NE = D->getNumElems(); I < NE; ++I) { reinterpret_cast<T *>(Ptr)[I].~T(); } } template <typename T> -static void moveArrayTy(Block *, char *Src, char *Dst, Descriptor *D) { +static void moveArrayTy(Block *, const std::byte *Src, std::byte *Dst, + const Descriptor *D) { + // FIXME: Get rid of the const_cast. + InitMapPtr &SrcIMP = + *reinterpret_cast<InitMapPtr *>(const_cast<std::byte *>(Src)); + if (SrcIMP) { + // We only ever invoke the moveFunc when moving block contents to a + // DeadBlock. DeadBlocks don't need InitMaps, so we destroy them here. + SrcIMP = std::nullopt; + } + Src += sizeof(InitMapPtr); + Dst += sizeof(InitMapPtr); for (unsigned I = 0, NE = D->getNumElems(); I < NE; ++I) { - auto *SrcPtr = &reinterpret_cast<T *>(Src)[I]; + const auto *SrcPtr = &reinterpret_cast<const T *>(Src)[I]; auto *DstPtr = &reinterpret_cast<T *>(Dst)[I]; new (DstPtr) T(std::move(*SrcPtr)); } } -static void ctorArrayDesc(Block *B, char *Ptr, bool IsConst, bool IsMutable, - bool IsActive, Descriptor *D) { +static void ctorArrayDesc(Block *B, std::byte *Ptr, bool IsConst, + bool IsMutable, bool IsActive, const Descriptor *D) { const unsigned NumElems = D->getNumElems(); const unsigned ElemSize = D->ElemDesc->getAllocSize() + sizeof(InlineDescriptor); @@ -63,7 +91,7 @@ static void ctorArrayDesc(Block *B, char *Ptr, bool IsConst, bool IsMutable, for (unsigned I = 0; I < NumElems; ++I, ElemOffset += ElemSize) { auto *ElemPtr = Ptr + ElemOffset; auto *Desc = reinterpret_cast<InlineDescriptor *>(ElemPtr); - auto *ElemLoc = reinterpret_cast<char *>(Desc + 1); + auto *ElemLoc = reinterpret_cast<std::byte *>(Desc + 1); auto *SD = D->ElemDesc; Desc->Offset = ElemOffset + sizeof(InlineDescriptor); @@ -72,13 +100,14 @@ static void ctorArrayDesc(Block *B, char *Ptr, bool IsConst, bool IsMutable, Desc->IsBase = false; Desc->IsActive = IsActive; Desc->IsConst = IsConst || D->IsConst; - Desc->IsMutable = IsMutable || D->IsMutable; + Desc->IsFieldMutable = IsMutable || D->IsMutable; if (auto Fn = D->ElemDesc->CtorFn) - Fn(B, ElemLoc, Desc->IsConst, Desc->IsMutable, IsActive, D->ElemDesc); + Fn(B, ElemLoc, Desc->IsConst, Desc->IsFieldMutable, IsActive, + D->ElemDesc); } } -static void dtorArrayDesc(Block *B, char *Ptr, Descriptor *D) { +static void dtorArrayDesc(Block *B, std::byte *Ptr, const Descriptor *D) { const unsigned NumElems = D->getNumElems(); const unsigned ElemSize = D->ElemDesc->getAllocSize() + sizeof(InlineDescriptor); @@ -87,26 +116,27 @@ static void dtorArrayDesc(Block *B, char *Ptr, Descriptor *D) { for (unsigned I = 0; I < NumElems; ++I, ElemOffset += ElemSize) { auto *ElemPtr = Ptr + ElemOffset; auto *Desc = reinterpret_cast<InlineDescriptor *>(ElemPtr); - auto *ElemLoc = reinterpret_cast<char *>(Desc + 1); + auto *ElemLoc = reinterpret_cast<std::byte *>(Desc + 1); if (auto Fn = D->ElemDesc->DtorFn) Fn(B, ElemLoc, D->ElemDesc); } } -static void moveArrayDesc(Block *B, char *Src, char *Dst, Descriptor *D) { +static void moveArrayDesc(Block *B, const std::byte *Src, std::byte *Dst, + const Descriptor *D) { const unsigned NumElems = D->getNumElems(); const unsigned ElemSize = D->ElemDesc->getAllocSize() + sizeof(InlineDescriptor); unsigned ElemOffset = 0; for (unsigned I = 0; I < NumElems; ++I, ElemOffset += ElemSize) { - auto *SrcPtr = Src + ElemOffset; + const auto *SrcPtr = Src + ElemOffset; auto *DstPtr = Dst + ElemOffset; - auto *SrcDesc = reinterpret_cast<InlineDescriptor *>(SrcPtr); - auto *SrcElemLoc = reinterpret_cast<char *>(SrcDesc + 1); + const auto *SrcDesc = reinterpret_cast<const InlineDescriptor *>(SrcPtr); + const auto *SrcElemLoc = reinterpret_cast<const std::byte *>(SrcDesc + 1); auto *DstDesc = reinterpret_cast<InlineDescriptor *>(DstPtr); - auto *DstElemLoc = reinterpret_cast<char *>(DstDesc + 1); + auto *DstElemLoc = reinterpret_cast<std::byte *>(DstDesc + 1); *DstDesc = *SrcDesc; if (auto Fn = D->ElemDesc->MoveFn) @@ -114,58 +144,122 @@ static void moveArrayDesc(Block *B, char *Src, char *Dst, Descriptor *D) { } } -static void ctorRecord(Block *B, char *Ptr, bool IsConst, bool IsMutable, - bool IsActive, Descriptor *D) { - const bool IsUnion = D->ElemRecord->isUnion(); - auto CtorSub = [=](unsigned SubOff, Descriptor *F, bool IsBase) { - auto *Desc = reinterpret_cast<InlineDescriptor *>(Ptr + SubOff) - 1; - Desc->Offset = SubOff; - Desc->Desc = F; - Desc->IsInitialized = (B->isStatic() || F->IsArray) && !IsBase; - Desc->IsBase = IsBase; - Desc->IsActive = IsActive && !IsUnion; - Desc->IsConst = IsConst || F->IsConst; - Desc->IsMutable = IsMutable || F->IsMutable; - if (auto Fn = F->CtorFn) - Fn(B, Ptr + SubOff, Desc->IsConst, Desc->IsMutable, Desc->IsActive, F); - }; - for (const auto &B : D->ElemRecord->bases()) - CtorSub(B.Offset, B.Desc, /*isBase=*/true); +static void initField(Block *B, std::byte *Ptr, bool IsConst, bool IsMutable, + bool IsActive, bool IsUnion, const Descriptor *D, + unsigned FieldOffset) { + auto *Desc = reinterpret_cast<InlineDescriptor *>(Ptr + FieldOffset) - 1; + Desc->Offset = FieldOffset; + Desc->Desc = D; + Desc->IsInitialized = D->IsArray; + Desc->IsBase = false; + Desc->IsActive = IsActive && !IsUnion; + Desc->IsConst = IsConst || D->IsConst; + Desc->IsFieldMutable = IsMutable || D->IsMutable; + + if (auto Fn = D->CtorFn) + Fn(B, Ptr + FieldOffset, Desc->IsConst, Desc->IsFieldMutable, + Desc->IsActive, D); +} + +static void initBase(Block *B, std::byte *Ptr, bool IsConst, bool IsMutable, + bool IsActive, const Descriptor *D, unsigned FieldOffset, + bool IsVirtualBase) { + assert(D); + assert(D->ElemRecord); + + bool IsUnion = D->ElemRecord->isUnion(); + auto *Desc = reinterpret_cast<InlineDescriptor *>(Ptr + FieldOffset) - 1; + Desc->Offset = FieldOffset; + Desc->Desc = D; + Desc->IsInitialized = D->IsArray; + Desc->IsBase = true; + Desc->IsVirtualBase = IsVirtualBase; + Desc->IsActive = IsActive && !IsUnion; + Desc->IsConst = IsConst || D->IsConst; + Desc->IsFieldMutable = IsMutable || D->IsMutable; + + for (const auto &V : D->ElemRecord->bases()) + initBase(B, Ptr + FieldOffset, IsConst, IsMutable, IsActive, V.Desc, + V.Offset, false); for (const auto &F : D->ElemRecord->fields()) - CtorSub(F.Offset, F.Desc, /*isBase=*/false); + initField(B, Ptr + FieldOffset, IsConst, IsMutable, IsActive, IsUnion, + F.Desc, F.Offset); +} + +static void ctorRecord(Block *B, std::byte *Ptr, bool IsConst, bool IsMutable, + bool IsActive, const Descriptor *D) { + for (const auto &V : D->ElemRecord->bases()) + initBase(B, Ptr, IsConst, IsMutable, IsActive, V.Desc, V.Offset, false); + for (const auto &F : D->ElemRecord->fields()) + initField(B, Ptr, IsConst, IsMutable, IsActive, D->ElemRecord->isUnion(), F.Desc, F.Offset); for (const auto &V : D->ElemRecord->virtual_bases()) - CtorSub(V.Offset, V.Desc, /*isBase=*/true); + initBase(B, Ptr, IsConst, IsMutable, IsActive, V.Desc, V.Offset, true); +} + +static void destroyField(Block *B, std::byte *Ptr, const Descriptor *D, + unsigned FieldOffset) { + if (auto Fn = D->DtorFn) + Fn(B, Ptr + FieldOffset, D); +} + +static void destroyBase(Block *B, std::byte *Ptr, const Descriptor *D, + unsigned FieldOffset) { + assert(D); + assert(D->ElemRecord); + + for (const auto &V : D->ElemRecord->bases()) + destroyBase(B, Ptr + FieldOffset, V.Desc, V.Offset); + for (const auto &F : D->ElemRecord->fields()) + destroyField(B, Ptr + FieldOffset, F.Desc, F.Offset); } -static void dtorRecord(Block *B, char *Ptr, Descriptor *D) { - auto DtorSub = [=](unsigned SubOff, Descriptor *F) { - if (auto Fn = F->DtorFn) - Fn(B, Ptr + SubOff, F); - }; +static void dtorRecord(Block *B, std::byte *Ptr, const Descriptor *D) { for (const auto &F : D->ElemRecord->bases()) - DtorSub(F.Offset, F.Desc); + destroyBase(B, Ptr, F.Desc, F.Offset); for (const auto &F : D->ElemRecord->fields()) - DtorSub(F.Offset, F.Desc); + destroyField(B, Ptr, F.Desc, F.Offset); for (const auto &F : D->ElemRecord->virtual_bases()) - DtorSub(F.Offset, F.Desc); + destroyBase(B, Ptr, F.Desc, F.Offset); } -static void moveRecord(Block *B, char *Src, char *Dst, Descriptor *D) { +static void moveRecord(Block *B, const std::byte *Src, std::byte *Dst, + const Descriptor *D) { for (const auto &F : D->ElemRecord->fields()) { auto FieldOff = F.Offset; - auto FieldDesc = F.Desc; + auto *FieldDesc = F.Desc; - *(reinterpret_cast<Descriptor **>(Dst + FieldOff) - 1) = FieldDesc; if (auto Fn = FieldDesc->MoveFn) Fn(B, Src + FieldOff, Dst + FieldOff, FieldDesc); } } static BlockCtorFn getCtorPrim(PrimType Type) { + // Floating types are special. They are primitives, but need their + // constructor called. + if (Type == PT_Float) + return ctorTy<PrimConv<PT_Float>::T>; + if (Type == PT_IntAP) + return ctorTy<PrimConv<PT_IntAP>::T>; + if (Type == PT_IntAPS) + return ctorTy<PrimConv<PT_IntAPS>::T>; + if (Type == PT_MemberPtr) + return ctorTy<PrimConv<PT_MemberPtr>::T>; + COMPOSITE_TYPE_SWITCH(Type, return ctorTy<T>, return nullptr); } static BlockDtorFn getDtorPrim(PrimType Type) { + // Floating types are special. They are primitives, but need their + // destructor called, since they might allocate memory. + if (Type == PT_Float) + return dtorTy<PrimConv<PT_Float>::T>; + if (Type == PT_IntAP) + return dtorTy<PrimConv<PT_IntAP>::T>; + if (Type == PT_IntAPS) + return dtorTy<PrimConv<PT_IntAPS>::T>; + if (Type == PT_MemberPtr) + return dtorTy<PrimConv<PT_MemberPtr>::T>; + COMPOSITE_TYPE_SWITCH(Type, return dtorTy<T>, return nullptr); } @@ -174,82 +268,123 @@ static BlockMoveFn getMovePrim(PrimType Type) { } static BlockCtorFn getCtorArrayPrim(PrimType Type) { - COMPOSITE_TYPE_SWITCH(Type, return ctorArrayTy<T>, return nullptr); + TYPE_SWITCH(Type, return ctorArrayTy<T>); + llvm_unreachable("unknown Expr"); } static BlockDtorFn getDtorArrayPrim(PrimType Type) { - COMPOSITE_TYPE_SWITCH(Type, return dtorArrayTy<T>, return nullptr); + TYPE_SWITCH(Type, return dtorArrayTy<T>); + llvm_unreachable("unknown Expr"); } static BlockMoveFn getMoveArrayPrim(PrimType Type) { - COMPOSITE_TYPE_SWITCH(Type, return moveArrayTy<T>, return nullptr); + TYPE_SWITCH(Type, return moveArrayTy<T>); + llvm_unreachable("unknown Expr"); } -Descriptor::Descriptor(const DeclTy &D, PrimType Type, bool IsConst, - bool IsTemporary, bool IsMutable) - : Source(D), ElemSize(primSize(Type)), Size(ElemSize), AllocSize(Size), +/// Primitives. +Descriptor::Descriptor(const DeclTy &D, PrimType Type, MetadataSize MD, + bool IsConst, bool IsTemporary, bool IsMutable) + : Source(D), ElemSize(primSize(Type)), Size(ElemSize), + MDSize(MD.value_or(0)), AllocSize(align(Size + MDSize)), PrimT(Type), IsConst(IsConst), IsMutable(IsMutable), IsTemporary(IsTemporary), CtorFn(getCtorPrim(Type)), DtorFn(getDtorPrim(Type)), MoveFn(getMovePrim(Type)) { + assert(AllocSize >= Size); assert(Source && "Missing source"); } -Descriptor::Descriptor(const DeclTy &D, PrimType Type, size_t NumElems, - bool IsConst, bool IsTemporary, bool IsMutable) +/// Primitive arrays. +Descriptor::Descriptor(const DeclTy &D, PrimType Type, MetadataSize MD, + size_t NumElems, bool IsConst, bool IsTemporary, + bool IsMutable) : Source(D), ElemSize(primSize(Type)), Size(ElemSize * NumElems), - AllocSize(align(Size) + sizeof(InitMap *)), IsConst(IsConst), - IsMutable(IsMutable), IsTemporary(IsTemporary), IsArray(true), - CtorFn(getCtorArrayPrim(Type)), DtorFn(getDtorArrayPrim(Type)), - MoveFn(getMoveArrayPrim(Type)) { + MDSize(MD.value_or(0)), + AllocSize(align(MDSize) + align(Size) + sizeof(InitMapPtr)), PrimT(Type), + IsConst(IsConst), IsMutable(IsMutable), IsTemporary(IsTemporary), + IsArray(true), CtorFn(getCtorArrayPrim(Type)), + DtorFn(getDtorArrayPrim(Type)), MoveFn(getMoveArrayPrim(Type)) { assert(Source && "Missing source"); + assert(NumElems <= (MaxArrayElemBytes / ElemSize)); } -Descriptor::Descriptor(const DeclTy &D, PrimType Type, bool IsTemporary, - UnknownSize) +/// Primitive unknown-size arrays. +Descriptor::Descriptor(const DeclTy &D, PrimType Type, MetadataSize MD, + bool IsTemporary, UnknownSize) : Source(D), ElemSize(primSize(Type)), Size(UnknownSizeMark), - AllocSize(alignof(void *)), IsConst(true), IsMutable(false), - IsTemporary(IsTemporary), IsArray(true), CtorFn(getCtorArrayPrim(Type)), - DtorFn(getDtorArrayPrim(Type)), MoveFn(getMoveArrayPrim(Type)) { + MDSize(MD.value_or(0)), + AllocSize(MDSize + sizeof(InitMapPtr) + alignof(void *)), IsConst(true), + IsMutable(false), IsTemporary(IsTemporary), IsArray(true), + CtorFn(getCtorArrayPrim(Type)), DtorFn(getDtorArrayPrim(Type)), + MoveFn(getMoveArrayPrim(Type)) { assert(Source && "Missing source"); } -Descriptor::Descriptor(const DeclTy &D, Descriptor *Elem, unsigned NumElems, - bool IsConst, bool IsTemporary, bool IsMutable) +/// Arrays of composite elements. +Descriptor::Descriptor(const DeclTy &D, const Descriptor *Elem, MetadataSize MD, + unsigned NumElems, bool IsConst, bool IsTemporary, + bool IsMutable) : Source(D), ElemSize(Elem->getAllocSize() + sizeof(InlineDescriptor)), - Size(ElemSize * NumElems), - AllocSize(std::max<size_t>(alignof(void *), Size)), ElemDesc(Elem), - IsConst(IsConst), IsMutable(IsMutable), IsTemporary(IsTemporary), - IsArray(true), CtorFn(ctorArrayDesc), DtorFn(dtorArrayDesc), - MoveFn(moveArrayDesc) { + Size(ElemSize * NumElems), MDSize(MD.value_or(0)), + AllocSize(std::max<size_t>(alignof(void *), Size) + MDSize), + ElemDesc(Elem), IsConst(IsConst), IsMutable(IsMutable), + IsTemporary(IsTemporary), IsArray(true), CtorFn(ctorArrayDesc), + DtorFn(dtorArrayDesc), MoveFn(moveArrayDesc) { assert(Source && "Missing source"); } -Descriptor::Descriptor(const DeclTy &D, Descriptor *Elem, bool IsTemporary, - UnknownSize) +/// Unknown-size arrays of composite elements. +Descriptor::Descriptor(const DeclTy &D, const Descriptor *Elem, MetadataSize MD, + bool IsTemporary, UnknownSize) : Source(D), ElemSize(Elem->getAllocSize() + sizeof(InlineDescriptor)), - Size(UnknownSizeMark), AllocSize(alignof(void *)), ElemDesc(Elem), - IsConst(true), IsMutable(false), IsTemporary(IsTemporary), IsArray(true), + Size(UnknownSizeMark), MDSize(MD.value_or(0)), + AllocSize(MDSize + alignof(void *)), ElemDesc(Elem), IsConst(true), + IsMutable(false), IsTemporary(IsTemporary), IsArray(true), CtorFn(ctorArrayDesc), DtorFn(dtorArrayDesc), MoveFn(moveArrayDesc) { assert(Source && "Missing source"); } -Descriptor::Descriptor(const DeclTy &D, Record *R, bool IsConst, - bool IsTemporary, bool IsMutable) +/// Composite records. +Descriptor::Descriptor(const DeclTy &D, const Record *R, MetadataSize MD, + bool IsConst, bool IsTemporary, bool IsMutable) : Source(D), ElemSize(std::max<size_t>(alignof(void *), R->getFullSize())), - Size(ElemSize), AllocSize(Size), ElemRecord(R), IsConst(IsConst), - IsMutable(IsMutable), IsTemporary(IsTemporary), CtorFn(ctorRecord), - DtorFn(dtorRecord), MoveFn(moveRecord) { + Size(ElemSize), MDSize(MD.value_or(0)), AllocSize(Size + MDSize), + ElemRecord(R), IsConst(IsConst), IsMutable(IsMutable), + IsTemporary(IsTemporary), CtorFn(ctorRecord), DtorFn(dtorRecord), + MoveFn(moveRecord) { + assert(Source && "Missing source"); +} + +/// Dummy. +Descriptor::Descriptor(const DeclTy &D) + : Source(D), ElemSize(1), Size(1), MDSize(0), AllocSize(MDSize), + ElemRecord(nullptr), IsConst(true), IsMutable(false), IsTemporary(false), + IsDummy(true) { assert(Source && "Missing source"); } QualType Descriptor::getType() const { - if (auto *E = asExpr()) + if (const auto *E = asExpr()) return E->getType(); - if (auto *D = asValueDecl()) + if (const auto *D = asValueDecl()) return D->getType(); + if (const auto *T = dyn_cast<TypeDecl>(asDecl())) + return QualType(T->getTypeForDecl(), 0); llvm_unreachable("Invalid descriptor type"); } +QualType Descriptor::getElemQualType() const { + assert(isArray()); + QualType T = getType(); + if (const auto *AT = T->getAsArrayTypeUnsafe()) + return AT->getElementType(); + if (const auto *CT = T->getAs<ComplexType>()) + return CT->getElementType(); + if (const auto *CT = T->getAs<VectorType>()) + return CT->getElementType(); + llvm_unreachable("Array that's not an array/complex/vector type?"); +} + SourceLocation Descriptor::getLocation() const { if (auto *D = Source.dyn_cast<const Decl *>()) return D->getLocation(); @@ -258,20 +393,14 @@ SourceLocation Descriptor::getLocation() const { llvm_unreachable("Invalid descriptor type"); } -InitMap::InitMap(unsigned N) : UninitFields(N) { - for (unsigned I = 0; I < N / PER_FIELD; ++I) { - data()[I] = 0; - } +InitMap::InitMap(unsigned N) + : UninitFields(N), Data(std::make_unique<T[]>(numFields(N))) { + std::fill_n(data(), numFields(N), 0); } -InitMap::T *InitMap::data() { - auto *Start = reinterpret_cast<char *>(this) + align(sizeof(InitMap)); - return reinterpret_cast<T *>(Start); -} - -bool InitMap::initialize(unsigned I) { +bool InitMap::initializeElement(unsigned I) { unsigned Bucket = I / PER_FIELD; - unsigned Mask = 1ull << static_cast<uint64_t>(I % PER_FIELD); + T Mask = T(1) << (I % PER_FIELD); if (!(data()[Bucket] & Mask)) { data()[Bucket] |= Mask; UninitFields -= 1; @@ -279,14 +408,7 @@ bool InitMap::initialize(unsigned I) { return UninitFields == 0; } -bool InitMap::isInitialized(unsigned I) { +bool InitMap::isElementInitialized(unsigned I) const { unsigned Bucket = I / PER_FIELD; - unsigned Mask = 1ull << static_cast<uint64_t>(I % PER_FIELD); - return data()[Bucket] & Mask; -} - -InitMap *InitMap::allocate(unsigned N) { - const size_t NumFields = ((N + PER_FIELD - 1) / PER_FIELD); - const size_t Size = align(sizeof(InitMap)) + NumFields * PER_FIELD; - return new (malloc(Size)) InitMap(N); + return data()[Bucket] & (T(1) << (I % PER_FIELD)); } diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.h b/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.h index b260b7600974..0cc5d77c407e 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.h +++ b/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.h @@ -13,6 +13,7 @@ #ifndef LLVM_CLANG_AST_INTERP_DESCRIPTOR_H #define LLVM_CLANG_AST_INTERP_DESCRIPTOR_H +#include "PrimType.h" #include "clang/AST/Decl.h" #include "clang/AST/Expr.h" @@ -20,44 +21,101 @@ namespace clang { namespace interp { class Block; class Record; +struct InitMap; struct Descriptor; enum PrimType : unsigned; using DeclTy = llvm::PointerUnion<const Decl *, const Expr *>; +using InitMapPtr = std::optional<std::pair<bool, std::shared_ptr<InitMap>>>; /// Invoked whenever a block is created. The constructor method fills in the /// inline descriptors of all fields and array elements. It also initializes /// all the fields which contain non-trivial types. -using BlockCtorFn = void (*)(Block *Storage, char *FieldPtr, bool IsConst, +using BlockCtorFn = void (*)(Block *Storage, std::byte *FieldPtr, bool IsConst, bool IsMutable, bool IsActive, - Descriptor *FieldDesc); + const Descriptor *FieldDesc); /// Invoked when a block is destroyed. Invokes the destructors of all /// non-trivial nested fields of arrays and records. -using BlockDtorFn = void (*)(Block *Storage, char *FieldPtr, - Descriptor *FieldDesc); +using BlockDtorFn = void (*)(Block *Storage, std::byte *FieldPtr, + const Descriptor *FieldDesc); /// Invoked when a block with pointers referencing it goes out of scope. Such /// blocks are persisted: the move function copies all inline descriptors and /// non-trivial fields, as existing pointers might need to reference those /// descriptors. Data is not copied since it cannot be legally read. -using BlockMoveFn = void (*)(Block *Storage, char *SrcFieldPtr, - char *DstFieldPtr, Descriptor *FieldDesc); +using BlockMoveFn = void (*)(Block *Storage, const std::byte *SrcFieldPtr, + std::byte *DstFieldPtr, + const Descriptor *FieldDesc); + +enum class GlobalInitState { + Initialized, + NoInitializer, + InitializerFailed, +}; -/// Object size as used by the interpreter. -using InterpSize = unsigned; +/// Descriptor used for global variables. +struct alignas(void *) GlobalInlineDescriptor { + GlobalInitState InitState = GlobalInitState::InitializerFailed; +}; +static_assert(sizeof(GlobalInlineDescriptor) == sizeof(void *), ""); + +/// Inline descriptor embedded in structures and arrays. +/// +/// Such descriptors precede all composite array elements and structure fields. +/// If the base of a pointer is not zero, the base points to the end of this +/// structure. The offset field is used to traverse the pointer chain up +/// to the root structure which allocated the object. +struct InlineDescriptor { + /// Offset inside the structure/array. + unsigned Offset; + + /// Flag indicating if the storage is constant or not. + /// Relevant for primitive fields. + LLVM_PREFERRED_TYPE(bool) + unsigned IsConst : 1; + /// For primitive fields, it indicates if the field was initialized. + /// Primitive fields in static storage are always initialized. + /// Arrays are always initialized, even though their elements might not be. + /// Base classes are initialized after the constructor is invoked. + LLVM_PREFERRED_TYPE(bool) + unsigned IsInitialized : 1; + /// Flag indicating if the field is an embedded base class. + LLVM_PREFERRED_TYPE(bool) + unsigned IsBase : 1; + LLVM_PREFERRED_TYPE(bool) + unsigned IsVirtualBase : 1; + /// Flag indicating if the field is the active member of a union. + LLVM_PREFERRED_TYPE(bool) + unsigned IsActive : 1; + /// Flag indicating if the field is mutable (if in a record). + LLVM_PREFERRED_TYPE(bool) + unsigned IsFieldMutable : 1; + + const Descriptor *Desc; + + InlineDescriptor(const Descriptor *D) + : Offset(sizeof(InlineDescriptor)), IsConst(false), IsInitialized(false), + IsBase(false), IsActive(false), IsFieldMutable(false), Desc(D) {} + + void dump() const { dump(llvm::errs()); } + void dump(llvm::raw_ostream &OS) const; +}; +static_assert(sizeof(GlobalInlineDescriptor) != sizeof(InlineDescriptor), ""); /// Describes a memory block created by an allocation site. -struct Descriptor { +struct Descriptor final { private: /// Original declaration, used to emit the error message. const DeclTy Source; /// Size of an element, in host bytes. - const InterpSize ElemSize; + const unsigned ElemSize; /// Size of the storage, in host bytes. - const InterpSize Size; + const unsigned Size; + /// Size of the metadata. + const unsigned MDSize; /// Size of the allocation (storage + metadata), in host bytes. - const InterpSize AllocSize; + const unsigned AllocSize; /// Value to denote arrays of unknown size. static constexpr unsigned UnknownSizeMark = (unsigned)-1; @@ -66,10 +124,23 @@ public: /// Token to denote structures of unknown size. struct UnknownSize {}; + using MetadataSize = std::optional<unsigned>; + static constexpr MetadataSize InlineDescMD = sizeof(InlineDescriptor); + static constexpr MetadataSize GlobalMD = sizeof(GlobalInlineDescriptor); + + /// Maximum number of bytes to be used for array elements. + static constexpr unsigned MaxArrayElemBytes = + std::numeric_limits<decltype(AllocSize)>::max() - sizeof(InitMapPtr) - + align(std::max(*InlineDescMD, *GlobalMD)); + /// Pointer to the record, if block contains records. - Record *const ElemRecord = nullptr; + const Record *const ElemRecord = nullptr; /// Descriptor of the array element. - Descriptor *const ElemDesc = nullptr; + const Descriptor *const ElemDesc = nullptr; + /// The primitive type this descriptor was created for, + /// or the primitive element type in case this is + /// a primitive array. + const std::optional<PrimType> PrimT = std::nullopt; /// Flag indicating if the block is mutable. const bool IsConst = false; /// Flag indicating if a field is mutable. @@ -78,6 +149,8 @@ public: const bool IsTemporary = false; /// Flag indicating if the block is an array. const bool IsArray = false; + /// Flag indicating if this is a dummy descriptor. + bool IsDummy = false; /// Storage management methods. const BlockCtorFn CtorFn = nullptr; @@ -85,43 +158,57 @@ public: const BlockMoveFn MoveFn = nullptr; /// Allocates a descriptor for a primitive. - Descriptor(const DeclTy &D, PrimType Type, bool IsConst, bool IsTemporary, - bool IsMutable); + Descriptor(const DeclTy &D, PrimType Type, MetadataSize MD, bool IsConst, + bool IsTemporary, bool IsMutable); /// Allocates a descriptor for an array of primitives. - Descriptor(const DeclTy &D, PrimType Type, size_t NumElems, bool IsConst, - bool IsTemporary, bool IsMutable); + Descriptor(const DeclTy &D, PrimType Type, MetadataSize MD, size_t NumElems, + bool IsConst, bool IsTemporary, bool IsMutable); /// Allocates a descriptor for an array of primitives of unknown size. - Descriptor(const DeclTy &D, PrimType Type, bool IsTemporary, UnknownSize); + Descriptor(const DeclTy &D, PrimType Type, MetadataSize MDSize, + bool IsTemporary, UnknownSize); /// Allocates a descriptor for an array of composites. - Descriptor(const DeclTy &D, Descriptor *Elem, unsigned NumElems, bool IsConst, - bool IsTemporary, bool IsMutable); + Descriptor(const DeclTy &D, const Descriptor *Elem, MetadataSize MD, + unsigned NumElems, bool IsConst, bool IsTemporary, bool IsMutable); /// Allocates a descriptor for an array of composites of unknown size. - Descriptor(const DeclTy &D, Descriptor *Elem, bool IsTemporary, UnknownSize); + Descriptor(const DeclTy &D, const Descriptor *Elem, MetadataSize MD, + bool IsTemporary, UnknownSize); /// Allocates a descriptor for a record. - Descriptor(const DeclTy &D, Record *R, bool IsConst, bool IsTemporary, - bool IsMutable); + Descriptor(const DeclTy &D, const Record *R, MetadataSize MD, bool IsConst, + bool IsTemporary, bool IsMutable); + + /// Allocates a dummy descriptor. + Descriptor(const DeclTy &D); + + /// Make this descriptor a dummy descriptor. + void makeDummy() { IsDummy = true; } QualType getType() const; + QualType getElemQualType() const; SourceLocation getLocation() const; const Decl *asDecl() const { return Source.dyn_cast<const Decl *>(); } const Expr *asExpr() const { return Source.dyn_cast<const Expr *>(); } + const DeclTy &getSource() const { return Source; } const ValueDecl *asValueDecl() const { - return dyn_cast_or_null<ValueDecl>(asDecl()); + return dyn_cast_if_present<ValueDecl>(asDecl()); + } + + const VarDecl *asVarDecl() const { + return dyn_cast_if_present<VarDecl>(asDecl()); } const FieldDecl *asFieldDecl() const { - return dyn_cast_or_null<FieldDecl>(asDecl()); + return dyn_cast_if_present<FieldDecl>(asDecl()); } const RecordDecl *asRecordDecl() const { - return dyn_cast_or_null<RecordDecl>(asDecl()); + return dyn_cast_if_present<RecordDecl>(asDecl()); } /// Returns the size of the object without metadata. @@ -130,10 +217,17 @@ public: return Size; } + PrimType getPrimType() const { + assert(isPrimitiveArray() || isPrimitive()); + return *PrimT; + } + /// Returns the allocated size, including metadata. unsigned getAllocSize() const { return AllocSize; } /// returns the size of an element when the structure is viewed as an array. unsigned getElemSize() const { return ElemSize; } + /// Returns the size of the metadata. + unsigned getMetadataSize() const { return MDSize; } /// Returns the number of elements stored in the block. unsigned getNumElems() const { @@ -142,6 +236,8 @@ public: /// Checks if the descriptor is of an array of primitives. bool isPrimitiveArray() const { return IsArray && !ElemDesc; } + /// Checks if the descriptor is of an array of composites. + bool isCompositeArray() const { return IsArray && ElemDesc; } /// Checks if the descriptor is of an array of zero size. bool isZeroSizeArray() const { return Size == 0; } /// Checks if the descriptor is of an array of unknown size. @@ -152,66 +248,46 @@ public: /// Checks if the descriptor is of an array. bool isArray() const { return IsArray; } -}; + /// Checks if the descriptor is of a record. + bool isRecord() const { return !IsArray && ElemRecord; } + /// Checks if this is a dummy descriptor. + bool isDummy() const { return IsDummy; } -/// Inline descriptor embedded in structures and arrays. -/// -/// Such descriptors precede all composite array elements and structure fields. -/// If the base of a pointer is not zero, the base points to the end of this -/// structure. The offset field is used to traverse the pointer chain up -/// to the root structure which allocated the object. -struct InlineDescriptor { - /// Offset inside the structure/array. - unsigned Offset; - - /// Flag indicating if the storage is constant or not. - /// Relevant for primitive fields. - unsigned IsConst : 1; - /// For primitive fields, it indicates if the field was initialized. - /// Primitive fields in static storage are always initialized. - /// Arrays are always initialized, even though their elements might not be. - /// Base classes are initialized after the constructor is invoked. - unsigned IsInitialized : 1; - /// Flag indicating if the field is an embedded base class. - unsigned IsBase : 1; - /// Flag indicating if the field is the active member of a union. - unsigned IsActive : 1; - /// Flag indicating if the field is mutable (if in a record). - unsigned IsMutable : 1; - - Descriptor *Desc; + void dump() const; + void dump(llvm::raw_ostream &OS) const; }; /// Bitfield tracking the initialisation status of elements of primitive arrays. -/// A pointer to this is embedded at the end of all primitive arrays. -/// If the map was not yet created and nothing was initialied, the pointer to -/// this structure is 0. If the object was fully initialized, the pointer is -1. -struct InitMap { +struct InitMap final { private: /// Type packing bits. using T = uint64_t; /// Bits stored in a single field. static constexpr uint64_t PER_FIELD = sizeof(T) * CHAR_BIT; +public: /// Initializes the map with no fields set. - InitMap(unsigned N); + explicit InitMap(unsigned N); + +private: + friend class Pointer; /// Returns a pointer to storage. - T *data(); + T *data() { return Data.get(); } + const T *data() const { return Data.get(); } -public: /// Initializes an element. Returns true when object if fully initialized. - bool initialize(unsigned I); + bool initializeElement(unsigned I); /// Checks if an element was initialized. - bool isInitialized(unsigned I); - - /// Allocates a map holding N elements. - static InitMap *allocate(unsigned N); + bool isElementInitialized(unsigned I) const; -private: - /// Number of fields initialized. + static constexpr size_t numFields(unsigned N) { + return (N + PER_FIELD - 1) / PER_FIELD; + } + /// Number of fields not initialized. unsigned UninitFields; + std::unique_ptr<T[]> Data; }; } // namespace interp diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Disasm.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Disasm.cpp index c1c18f832d4f..867284ecf7f4 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/Disasm.cpp +++ b/contrib/llvm-project/clang/lib/AST/Interp/Disasm.cpp @@ -10,40 +10,74 @@ // //===----------------------------------------------------------------------===// +#include "Boolean.h" +#include "Context.h" +#include "EvaluationResult.h" +#include "Floating.h" #include "Function.h" +#include "FunctionPointer.h" +#include "Integral.h" +#include "IntegralAP.h" +#include "InterpFrame.h" +#include "MemberPointer.h" #include "Opcode.h" #include "PrimType.h" #include "Program.h" +#include "clang/AST/ASTDumperUtils.h" #include "clang/AST/DeclCXX.h" +#include "clang/AST/ExprCXX.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/Format.h" using namespace clang; using namespace clang::interp; +template <typename T> inline T ReadArg(Program &P, CodePtr &OpPC) { + if constexpr (std::is_pointer_v<T>) { + uint32_t ID = OpPC.read<uint32_t>(); + return reinterpret_cast<T>(P.getNativePointer(ID)); + } else { + return OpPC.read<T>(); + } +} + +template <> inline Floating ReadArg<Floating>(Program &P, CodePtr &OpPC) { + Floating F = Floating::deserialize(*OpPC); + OpPC += align(F.bytesToSerialize()); + return F; +} + +template <> +inline IntegralAP<false> ReadArg<IntegralAP<false>>(Program &P, CodePtr &OpPC) { + IntegralAP<false> I = IntegralAP<false>::deserialize(*OpPC); + OpPC += align(I.bytesToSerialize()); + return I; +} + +template <> +inline IntegralAP<true> ReadArg<IntegralAP<true>>(Program &P, CodePtr &OpPC) { + IntegralAP<true> I = IntegralAP<true>::deserialize(*OpPC); + OpPC += align(I.bytesToSerialize()); + return I; +} + LLVM_DUMP_METHOD void Function::dump() const { dump(llvm::errs()); } LLVM_DUMP_METHOD void Function::dump(llvm::raw_ostream &OS) const { - if (F) { - if (auto *Cons = dyn_cast<CXXConstructorDecl>(F)) { - DeclarationName Name = Cons->getParent()->getDeclName(); - OS << Name << "::" << Name << ":\n"; - } else { - OS << F->getDeclName() << ":\n"; - } - } else { - OS << "<<expr>>\n"; + { + ColorScope SC(OS, true, {llvm::raw_ostream::BRIGHT_GREEN, true}); + OS << getName() << " " << (const void *)this << "\n"; } - OS << "frame size: " << getFrameSize() << "\n"; OS << "arg size: " << getArgSize() << "\n"; OS << "rvo: " << hasRVO() << "\n"; + OS << "this arg: " << hasThisPointer() << "\n"; auto PrintName = [&OS](const char *Name) { OS << Name; - for (long I = 0, N = strlen(Name); I < 30 - N; ++I) { - OS << ' '; - } + long N = 30 - strlen(Name); + if (N > 0) + OS.indent(N); }; for (CodePtr Start = getCodeBegin(), PC = Start; PC != getCodeEnd();) { @@ -60,11 +94,289 @@ LLVM_DUMP_METHOD void Function::dump(llvm::raw_ostream &OS) const { LLVM_DUMP_METHOD void Program::dump() const { dump(llvm::errs()); } +static const char *primTypeToString(PrimType T) { + switch (T) { + case PT_Sint8: + return "Sint8"; + case PT_Uint8: + return "Uint8"; + case PT_Sint16: + return "Sint16"; + case PT_Uint16: + return "Uint16"; + case PT_Sint32: + return "Sint32"; + case PT_Uint32: + return "Uint32"; + case PT_Sint64: + return "Sint64"; + case PT_Uint64: + return "Uint64"; + case PT_IntAP: + return "IntAP"; + case PT_IntAPS: + return "IntAPS"; + case PT_Bool: + return "Bool"; + case PT_Float: + return "Float"; + case PT_Ptr: + return "Ptr"; + case PT_FnPtr: + return "FnPtr"; + case PT_MemberPtr: + return "MemberPtr"; + } + llvm_unreachable("Unhandled PrimType"); +} + LLVM_DUMP_METHOD void Program::dump(llvm::raw_ostream &OS) const { - for (auto &Func : Funcs) { + { + ColorScope SC(OS, true, {llvm::raw_ostream::BRIGHT_RED, true}); + OS << "\n:: Program\n"; + } + + { + ColorScope SC(OS, true, {llvm::raw_ostream::WHITE, true}); + OS << "Total memory : " << Allocator.getTotalMemory() << " bytes\n"; + OS << "Global Variables: " << Globals.size() << "\n"; + } + unsigned GI = 0; + for (const Global *G : Globals) { + const Descriptor *Desc = G->block()->getDescriptor(); + Pointer GP = getPtrGlobal(GI); + + OS << GI << ": " << (const void *)G->block() << " "; + { + ColorScope SC(OS, true, + GP.isInitialized() + ? TerminalColor{llvm::raw_ostream::GREEN, false} + : TerminalColor{llvm::raw_ostream::RED, false}); + OS << (GP.isInitialized() ? "initialized " : "uninitialized "); + } + Desc->dump(OS); + + if (GP.isInitialized() && Desc->IsTemporary) { + if (const auto *MTE = + dyn_cast_if_present<MaterializeTemporaryExpr>(Desc->asExpr()); + MTE && MTE->getLifetimeExtendedTemporaryDecl()) { + if (const APValue *V = + MTE->getLifetimeExtendedTemporaryDecl()->getValue()) { + OS << " (global temporary value: "; + { + ColorScope SC(OS, true, {llvm::raw_ostream::BRIGHT_MAGENTA, true}); + std::string VStr; + llvm::raw_string_ostream SS(VStr); + V->dump(SS, Ctx.getASTContext()); + + for (unsigned I = 0; I != VStr.size(); ++I) { + if (VStr[I] == '\n') + VStr[I] = ' '; + } + VStr.pop_back(); // Remove the newline (or now space) at the end. + OS << VStr; + } + OS << ')'; + } + } + } + + OS << "\n"; + if (GP.isInitialized() && Desc->isPrimitive() && !Desc->isDummy()) { + OS << " "; + { + ColorScope SC(OS, true, {llvm::raw_ostream::BRIGHT_CYAN, false}); + OS << primTypeToString(Desc->getPrimType()) << " "; + } + TYPE_SWITCH(Desc->getPrimType(), { GP.deref<T>().print(OS); }); + OS << "\n"; + } + ++GI; + } + + { + ColorScope SC(OS, true, {llvm::raw_ostream::WHITE, true}); + OS << "Functions: " << Funcs.size() << "\n"; + } + for (const auto &Func : Funcs) { Func.second->dump(); } - for (auto &Anon : AnonFuncs) { + for (const auto &Anon : AnonFuncs) { Anon->dump(); } } + +LLVM_DUMP_METHOD void Descriptor::dump() const { + dump(llvm::errs()); + llvm::errs() << '\n'; +} + +LLVM_DUMP_METHOD void Descriptor::dump(llvm::raw_ostream &OS) const { + // Source + { + ColorScope SC(OS, true, {llvm::raw_ostream::BLUE, true}); + if (const auto *ND = dyn_cast_if_present<NamedDecl>(asDecl())) + ND->printQualifiedName(OS); + else if (asExpr()) + OS << "Expr " << (const void *)asExpr(); + } + + // Print a few interesting bits about the descriptor. + if (isPrimitiveArray()) + OS << " primitive-array"; + else if (isCompositeArray()) + OS << " composite-array"; + else if (isRecord()) + OS << " record"; + else if (isPrimitive()) + OS << " primitive"; + + if (isZeroSizeArray()) + OS << " zero-size-array"; + else if (isUnknownSizeArray()) + OS << " unknown-size-array"; + + if (isDummy()) + OS << " dummy"; +} + +LLVM_DUMP_METHOD void InlineDescriptor::dump(llvm::raw_ostream &OS) const { + { + ColorScope SC(OS, true, {llvm::raw_ostream::BLUE, true}); + OS << "InlineDescriptor " << (const void *)this << "\n"; + } + OS << "Offset: " << Offset << "\n"; + OS << "IsConst: " << IsConst << "\n"; + OS << "IsInitialized: " << IsInitialized << "\n"; + OS << "IsBase: " << IsBase << "\n"; + OS << "IsActive: " << IsActive << "\n"; + OS << "IsFieldMutable: " << IsFieldMutable << "\n"; + OS << "Desc: "; + if (Desc) + Desc->dump(OS); + else + OS << "nullptr"; + OS << "\n"; +} + +LLVM_DUMP_METHOD void InterpFrame::dump(llvm::raw_ostream &OS, + unsigned Indent) const { + unsigned Spaces = Indent * 2; + { + ColorScope SC(OS, true, {llvm::raw_ostream::BLUE, true}); + OS.indent(Spaces); + if (getCallee()) + describe(OS); + else + OS << "Frame (Depth: " << getDepth() << ")"; + OS << "\n"; + } + OS.indent(Spaces) << "Function: " << getFunction(); + if (const Function *F = getFunction()) { + OS << " (" << F->getName() << ")"; + } + OS << "\n"; + OS.indent(Spaces) << "This: " << getThis() << "\n"; + OS.indent(Spaces) << "RVO: " << getRVOPtr() << "\n"; + + while (const InterpFrame *F = this->Caller) { + F->dump(OS, Indent + 1); + F = F->Caller; + } +} + +LLVM_DUMP_METHOD void Record::dump(llvm::raw_ostream &OS, unsigned Indentation, + unsigned Offset) const { + unsigned Indent = Indentation * 2; + OS.indent(Indent); + { + ColorScope SC(OS, true, {llvm::raw_ostream::BLUE, true}); + OS << getName() << "\n"; + } + + unsigned I = 0; + for (const Record::Base &B : bases()) { + OS.indent(Indent) << "- Base " << I << ". Offset " << (Offset + B.Offset) + << "\n"; + B.R->dump(OS, Indentation + 1, Offset + B.Offset); + ++I; + } + + I = 0; + for (const Record::Field &F : fields()) { + OS.indent(Indent) << "- Field " << I << ": "; + { + ColorScope SC(OS, true, {llvm::raw_ostream::BRIGHT_RED, true}); + OS << F.Decl->getName(); + } + OS << ". Offset " << (Offset + F.Offset) << "\n"; + ++I; + } + + I = 0; + for (const Record::Base &B : virtual_bases()) { + OS.indent(Indent) << "- Virtual Base " << I << ". Offset " + << (Offset + B.Offset) << "\n"; + B.R->dump(OS, Indentation + 1, Offset + B.Offset); + ++I; + } +} + +LLVM_DUMP_METHOD void Block::dump(llvm::raw_ostream &OS) const { + { + ColorScope SC(OS, true, {llvm::raw_ostream::BRIGHT_BLUE, true}); + OS << "Block " << (const void *)this; + } + OS << " ("; + Desc->dump(OS); + OS << ")\n"; + unsigned NPointers = 0; + for (const Pointer *P = Pointers; P; P = P->Next) { + ++NPointers; + } + OS << " Pointers: " << NPointers << "\n"; + OS << " Dead: " << IsDead << "\n"; + OS << " Static: " << IsStatic << "\n"; + OS << " Extern: " << IsExtern << "\n"; + OS << " Initialized: " << IsInitialized << "\n"; +} + +LLVM_DUMP_METHOD void EvaluationResult::dump() const { + assert(Ctx); + auto &OS = llvm::errs(); + const ASTContext &ASTCtx = Ctx->getASTContext(); + + switch (Kind) { + case Empty: + OS << "Empty\n"; + break; + case RValue: + OS << "RValue: "; + std::get<APValue>(Value).dump(OS, ASTCtx); + break; + case LValue: { + assert(Source); + QualType SourceType; + if (const auto *D = Source.dyn_cast<const Decl *>()) { + if (const auto *VD = dyn_cast<ValueDecl>(D)) + SourceType = VD->getType(); + } else if (const auto *E = Source.dyn_cast<const Expr *>()) { + SourceType = E->getType(); + } + + OS << "LValue: "; + if (const auto *P = std::get_if<Pointer>(&Value)) + P->toAPValue(ASTCtx).printPretty(OS, ASTCtx, SourceType); + else if (const auto *FP = std::get_if<FunctionPointer>(&Value)) // Nope + FP->toAPValue(ASTCtx).printPretty(OS, ASTCtx, SourceType); + OS << "\n"; + break; + } + case Invalid: + OS << "Invalid\n"; + break; + case Valid: + OS << "Valid\n"; + break; + } +} diff --git a/contrib/llvm-project/clang/lib/AST/Interp/DynamicAllocator.cpp b/contrib/llvm-project/clang/lib/AST/Interp/DynamicAllocator.cpp new file mode 100644 index 000000000000..a51599774078 --- /dev/null +++ b/contrib/llvm-project/clang/lib/AST/Interp/DynamicAllocator.cpp @@ -0,0 +1,118 @@ +//==-------- DynamicAllocator.cpp - Dynamic allocations ----------*- C++ -*-==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "DynamicAllocator.h" +#include "InterpBlock.h" +#include "InterpState.h" + +using namespace clang; +using namespace clang::interp; + +DynamicAllocator::~DynamicAllocator() { cleanup(); } + +void DynamicAllocator::cleanup() { + // Invoke destructors of all the blocks and as a last restort, + // reset all the pointers pointing to them to null pointees. + // This should never show up in diagnostics, but it's necessary + // for us to not cause use-after-free problems. + for (auto &Iter : AllocationSites) { + auto &AllocSite = Iter.second; + for (auto &Alloc : AllocSite.Allocations) { + Block *B = reinterpret_cast<Block *>(Alloc.Memory.get()); + B->invokeDtor(); + if (B->hasPointers()) { + while (B->Pointers) { + Pointer *Next = B->Pointers->Next; + B->Pointers->PointeeStorage.BS.Pointee = nullptr; + B->Pointers = Next; + } + B->Pointers = nullptr; + } + } + } + + AllocationSites.clear(); +} + +Block *DynamicAllocator::allocate(const Expr *Source, PrimType T, + size_t NumElements, unsigned EvalID) { + // Create a new descriptor for an array of the specified size and + // element type. + const Descriptor *D = allocateDescriptor( + Source, T, Descriptor::InlineDescMD, NumElements, /*IsConst=*/false, + /*IsTemporary=*/false, /*IsMutable=*/false); + + return allocate(D, EvalID); +} + +Block *DynamicAllocator::allocate(const Descriptor *ElementDesc, + size_t NumElements, unsigned EvalID) { + // Create a new descriptor for an array of the specified size and + // element type. + const Descriptor *D = allocateDescriptor( + ElementDesc->asExpr(), ElementDesc, Descriptor::InlineDescMD, NumElements, + /*IsConst=*/false, /*IsTemporary=*/false, /*IsMutable=*/false); + return allocate(D, EvalID); +} + +Block *DynamicAllocator::allocate(const Descriptor *D, unsigned EvalID) { + assert(D); + assert(D->asExpr()); + + auto Memory = + std::make_unique<std::byte[]>(sizeof(Block) + D->getAllocSize()); + auto *B = new (Memory.get()) Block(EvalID, D, /*isStatic=*/false); + B->invokeCtor(); + + InlineDescriptor *ID = reinterpret_cast<InlineDescriptor *>(B->rawData()); + ID->Desc = D; + ID->IsActive = true; + ID->Offset = sizeof(InlineDescriptor); + ID->IsBase = false; + ID->IsFieldMutable = false; + ID->IsConst = false; + ID->IsInitialized = false; + + B->IsDynamic = true; + + if (auto It = AllocationSites.find(D->asExpr()); It != AllocationSites.end()) + It->second.Allocations.emplace_back(std::move(Memory)); + else + AllocationSites.insert( + {D->asExpr(), AllocationSite(std::move(Memory), D->isArray())}); + return B; +} + +bool DynamicAllocator::deallocate(const Expr *Source, + const Block *BlockToDelete, InterpState &S) { + auto It = AllocationSites.find(Source); + if (It == AllocationSites.end()) + return false; + + auto &Site = It->second; + assert(Site.size() > 0); + + // Find the Block to delete. + auto AllocIt = llvm::find_if(Site.Allocations, [&](const Allocation &A) { + const Block *B = reinterpret_cast<const Block *>(A.Memory.get()); + return BlockToDelete == B; + }); + + assert(AllocIt != Site.Allocations.end()); + + Block *B = reinterpret_cast<Block *>(AllocIt->Memory.get()); + B->invokeDtor(); + + S.deallocate(B); + Site.Allocations.erase(AllocIt); + + if (Site.size() == 0) + AllocationSites.erase(It); + + return true; +} diff --git a/contrib/llvm-project/clang/lib/AST/Interp/DynamicAllocator.h b/contrib/llvm-project/clang/lib/AST/Interp/DynamicAllocator.h new file mode 100644 index 000000000000..a84600aa54cc --- /dev/null +++ b/contrib/llvm-project/clang/lib/AST/Interp/DynamicAllocator.h @@ -0,0 +1,102 @@ +//==--------- DynamicAllocator.h - Dynamic allocations ------------*- C++ -*-=// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_AST_INTERP_DYNAMIC_ALLOCATOR_H +#define LLVM_CLANG_AST_INTERP_DYNAMIC_ALLOCATOR_H + +#include "Descriptor.h" +#include "InterpBlock.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/iterator_range.h" +#include "llvm/Support/Allocator.h" + +namespace clang { +class Expr; +namespace interp { +class Block; +class InterpState; + +/// Manages dynamic memory allocations done during bytecode interpretation. +/// +/// We manage allocations as a map from their new-expression to a list +/// of allocations. This is called an AllocationSite. For each site, we +/// record whether it was allocated using new or new[], the +/// IsArrayAllocation flag. +/// +/// For all array allocations, we need to allocate new Descriptor instances, +/// so the DynamicAllocator has a llvm::BumpPtrAllocator similar to Program. +class DynamicAllocator final { + struct Allocation { + std::unique_ptr<std::byte[]> Memory; + Allocation(std::unique_ptr<std::byte[]> Memory) + : Memory(std::move(Memory)) {} + }; + + struct AllocationSite { + llvm::SmallVector<Allocation> Allocations; + bool IsArrayAllocation = false; + + AllocationSite(std::unique_ptr<std::byte[]> Memory, bool Array) + : IsArrayAllocation(Array) { + Allocations.push_back({std::move(Memory)}); + } + + size_t size() const { return Allocations.size(); } + }; + +public: + DynamicAllocator() = default; + ~DynamicAllocator(); + + void cleanup(); + + unsigned getNumAllocations() const { return AllocationSites.size(); } + + /// Allocate ONE element of the given descriptor. + Block *allocate(const Descriptor *D, unsigned EvalID); + /// Allocate \p NumElements primitive elements of the given type. + Block *allocate(const Expr *Source, PrimType T, size_t NumElements, + unsigned EvalID); + /// Allocate \p NumElements elements of the given descriptor. + Block *allocate(const Descriptor *D, size_t NumElements, unsigned EvalID); + + /// Deallocate the given source+block combination. + /// Returns \c true if anything has been deallocatd, \c false otherwise. + bool deallocate(const Expr *Source, const Block *BlockToDelete, + InterpState &S); + + /// Checks whether the allocation done at the given source is an array + /// allocation. + bool isArrayAllocation(const Expr *Source) const { + if (auto It = AllocationSites.find(Source); It != AllocationSites.end()) + return It->second.IsArrayAllocation; + return false; + } + + /// Allocation site iterator. + using const_virtual_iter = + llvm::DenseMap<const Expr *, AllocationSite>::const_iterator; + llvm::iterator_range<const_virtual_iter> allocation_sites() const { + return llvm::make_range(AllocationSites.begin(), AllocationSites.end()); + } + +private: + llvm::DenseMap<const Expr *, AllocationSite> AllocationSites; + + using PoolAllocTy = llvm::BumpPtrAllocatorImpl<llvm::MallocAllocator>; + PoolAllocTy DescAllocator; + + /// Allocates a new descriptor. + template <typename... Ts> Descriptor *allocateDescriptor(Ts &&...Args) { + return new (DescAllocator) Descriptor(std::forward<Ts>(Args)...); + } +}; + +} // namespace interp +} // namespace clang +#endif diff --git a/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.cpp b/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.cpp index 22e8695b9211..08536536ac3c 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.cpp +++ b/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.cpp @@ -8,38 +8,72 @@ #include "EvalEmitter.h" #include "Context.h" +#include "IntegralAP.h" #include "Interp.h" #include "Opcode.h" -#include "Program.h" #include "clang/AST/DeclCXX.h" using namespace clang; using namespace clang::interp; -using APSInt = llvm::APSInt; -template <typename T> using Expected = llvm::Expected<T>; - EvalEmitter::EvalEmitter(Context &Ctx, Program &P, State &Parent, - InterpStack &Stk, APValue &Result) - : Ctx(Ctx), P(P), S(Parent, P, Stk, Ctx, this), Result(Result) { + InterpStack &Stk) + : Ctx(Ctx), P(P), S(Parent, P, Stk, Ctx, this), EvalResult(&Ctx) { // Create a dummy frame for the interpreter which does not have locals. - S.Current = new InterpFrame(S, nullptr, nullptr, CodePtr(), Pointer()); + S.Current = + new InterpFrame(S, /*Func=*/nullptr, /*Caller=*/nullptr, CodePtr(), 0); } -llvm::Expected<bool> EvalEmitter::interpretExpr(const Expr *E) { - if (this->visitExpr(E)) - return true; - if (BailLocation) - return llvm::make_error<ByteCodeGenError>(*BailLocation); - return false; +EvalEmitter::~EvalEmitter() { + for (auto &[K, V] : Locals) { + Block *B = reinterpret_cast<Block *>(V.get()); + if (B->isInitialized()) + B->invokeDtor(); + } } -llvm::Expected<bool> EvalEmitter::interpretDecl(const VarDecl *VD) { - if (this->visitDecl(VD)) - return true; - if (BailLocation) - return llvm::make_error<ByteCodeGenError>(*BailLocation); - return false; +/// Clean up all our resources. This needs to done in failed evaluations before +/// we call InterpStack::clear(), because there might be a Pointer on the stack +/// pointing into a Block in the EvalEmitter. +void EvalEmitter::cleanup() { S.cleanup(); } + +EvaluationResult EvalEmitter::interpretExpr(const Expr *E, + bool ConvertResultToRValue) { + S.setEvalLocation(E->getExprLoc()); + this->ConvertResultToRValue = ConvertResultToRValue && !isa<ConstantExpr>(E); + this->CheckFullyInitialized = isa<ConstantExpr>(E); + EvalResult.setSource(E); + + if (!this->visitExpr(E)) { + // EvalResult may already have a result set, but something failed + // after that (e.g. evaluating destructors). + EvalResult.setInvalid(); + } + + return std::move(this->EvalResult); +} + +EvaluationResult EvalEmitter::interpretDecl(const VarDecl *VD, + bool CheckFullyInitialized) { + this->CheckFullyInitialized = CheckFullyInitialized; + S.EvaluatingDecl = VD; + EvalResult.setSource(VD); + + if (const Expr *Init = VD->getAnyInitializer()) { + QualType T = VD->getType(); + this->ConvertResultToRValue = !Init->isGLValue() && !T->isPointerType() && + !T->isObjCObjectPointerType(); + } else + this->ConvertResultToRValue = false; + + EvalResult.setSource(VD); + + if (!this->visitDeclAndReturn(VD, S.inConstantContext())) + EvalResult.setInvalid(); + + S.EvaluatingDecl = nullptr; + updateGlobalTemporaries(); + return std::move(this->EvalResult); } void EvalEmitter::emitLabel(LabelTy Label) { @@ -51,21 +85,25 @@ EvalEmitter::LabelTy EvalEmitter::getLabel() { return NextLabel++; } Scope::Local EvalEmitter::createLocal(Descriptor *D) { // Allocate memory for a local. auto Memory = std::make_unique<char[]>(sizeof(Block) + D->getAllocSize()); - auto *B = new (Memory.get()) Block(D, /*isStatic=*/false); + auto *B = new (Memory.get()) Block(Ctx.getEvalID(), D, /*isStatic=*/false); B->invokeCtor(); + // Initialize local variable inline descriptor. + InlineDescriptor &Desc = *reinterpret_cast<InlineDescriptor *>(B->rawData()); + Desc.Desc = D; + Desc.Offset = sizeof(InlineDescriptor); + Desc.IsActive = true; + Desc.IsBase = false; + Desc.IsFieldMutable = false; + Desc.IsConst = false; + Desc.IsInitialized = false; + // Register the local. unsigned Off = Locals.size(); Locals.insert({Off, std::move(Memory)}); return {Off, D}; } -bool EvalEmitter::bail(const SourceLocation &Loc) { - if (!BailLocation) - BailLocation = Loc; - return false; -} - bool EvalEmitter::jumpTrue(const LabelTy &Label) { if (isActive()) { if (S.Stk.pop<bool>()) @@ -95,111 +133,104 @@ bool EvalEmitter::fallthrough(const LabelTy &Label) { return true; } +static bool checkReturnState(InterpState &S) { + return S.maybeDiagnoseDanglingAllocations(); +} + template <PrimType OpType> bool EvalEmitter::emitRet(const SourceInfo &Info) { if (!isActive()) return true; + + if (!checkReturnState(S)) + return false; + using T = typename PrimConv<OpType>::T; - return ReturnValue<T>(S.Stk.pop<T>(), Result); + EvalResult.setValue(S.Stk.pop<T>().toAPValue(Ctx.getASTContext())); + return true; } -bool EvalEmitter::emitRetVoid(const SourceInfo &Info) { return true; } +template <> bool EvalEmitter::emitRet<PT_Ptr>(const SourceInfo &Info) { + if (!isActive()) + return true; -bool EvalEmitter::emitRetValue(const SourceInfo &Info) { - // Method to recursively traverse composites. - std::function<bool(QualType, const Pointer &, APValue &)> Composite; - Composite = [this, &Composite](QualType Ty, const Pointer &Ptr, APValue &R) { - if (auto *AT = Ty->getAs<AtomicType>()) - Ty = AT->getValueType(); - - if (auto *RT = Ty->getAs<RecordType>()) { - auto *Record = Ptr.getRecord(); - assert(Record && "Missing record descriptor"); - - bool Ok = true; - if (RT->getDecl()->isUnion()) { - const FieldDecl *ActiveField = nullptr; - APValue Value; - for (auto &F : Record->fields()) { - const Pointer &FP = Ptr.atField(F.Offset); - QualType FieldTy = F.Decl->getType(); - if (FP.isActive()) { - if (llvm::Optional<PrimType> T = Ctx.classify(FieldTy)) { - TYPE_SWITCH(*T, Ok &= ReturnValue<T>(FP.deref<T>(), Value)); - } else { - Ok &= Composite(FieldTy, FP, Value); - } - break; - } - } - R = APValue(ActiveField, Value); - } else { - unsigned NF = Record->getNumFields(); - unsigned NB = Record->getNumBases(); - unsigned NV = Ptr.isBaseClass() ? 0 : Record->getNumVirtualBases(); - - R = APValue(APValue::UninitStruct(), NB, NF); - - for (unsigned I = 0; I < NF; ++I) { - const Record::Field *FD = Record->getField(I); - QualType FieldTy = FD->Decl->getType(); - const Pointer &FP = Ptr.atField(FD->Offset); - APValue &Value = R.getStructField(I); - - if (llvm::Optional<PrimType> T = Ctx.classify(FieldTy)) { - TYPE_SWITCH(*T, Ok &= ReturnValue<T>(FP.deref<T>(), Value)); - } else { - Ok &= Composite(FieldTy, FP, Value); - } - } - - for (unsigned I = 0; I < NB; ++I) { - const Record::Base *BD = Record->getBase(I); - QualType BaseTy = Ctx.getASTContext().getRecordType(BD->Decl); - const Pointer &BP = Ptr.atField(BD->Offset); - Ok &= Composite(BaseTy, BP, R.getStructBase(I)); - } - - for (unsigned I = 0; I < NV; ++I) { - const Record::Base *VD = Record->getVirtualBase(I); - QualType VirtBaseTy = Ctx.getASTContext().getRecordType(VD->Decl); - const Pointer &VP = Ptr.atField(VD->Offset); - Ok &= Composite(VirtBaseTy, VP, R.getStructBase(NB + I)); - } - } - return Ok; + const Pointer &Ptr = S.Stk.pop<Pointer>(); + + if (!EvalResult.checkReturnValue(S, Ctx, Ptr, Info)) + return false; + if (CheckFullyInitialized && !EvalResult.checkFullyInitialized(S, Ptr)) + return false; + + if (!checkReturnState(S)) + return false; + + // Implicitly convert lvalue to rvalue, if requested. + if (ConvertResultToRValue) { + if (!Ptr.isZero() && !Ptr.isDereferencable()) + return false; + // Never allow reading from a non-const pointer, unless the memory + // has been created in this evaluation. + if (!Ptr.isZero() && Ptr.isBlockPointer() && + Ptr.block()->getEvalID() != Ctx.getEvalID() && + (!CheckLoad(S, OpPC, Ptr, AK_Read) || !Ptr.isConst())) + return false; + + if (std::optional<APValue> V = + Ptr.toRValue(Ctx, EvalResult.getSourceType())) { + EvalResult.setValue(*V); + } else { + return false; } - if (auto *AT = Ty->getAsArrayTypeUnsafe()) { - const size_t NumElems = Ptr.getNumElems(); - QualType ElemTy = AT->getElementType(); - R = APValue(APValue::UninitArray{}, NumElems, NumElems); - - bool Ok = true; - for (unsigned I = 0; I < NumElems; ++I) { - APValue &Slot = R.getArrayInitializedElt(I); - const Pointer &EP = Ptr.atIndex(I); - if (llvm::Optional<PrimType> T = Ctx.classify(ElemTy)) { - TYPE_SWITCH(*T, Ok &= ReturnValue<T>(EP.deref<T>(), Slot)); - } else { - Ok &= Composite(ElemTy, EP.narrow(), Slot); - } - } - return Ok; - } - llvm_unreachable("invalid value to return"); - }; + } else { + EvalResult.setValue(Ptr.toAPValue(Ctx.getASTContext())); + } + + return true; +} +template <> bool EvalEmitter::emitRet<PT_FnPtr>(const SourceInfo &Info) { + if (!isActive()) + return true; - // Return the composite type. + if (!checkReturnState(S)) + return false; + // Function pointers cannot be converted to rvalues. + EvalResult.setFunctionPointer(S.Stk.pop<FunctionPointer>()); + return true; +} + +bool EvalEmitter::emitRetVoid(const SourceInfo &Info) { + if (!checkReturnState(S)) + return false; + EvalResult.setValid(); + return true; +} + +bool EvalEmitter::emitRetValue(const SourceInfo &Info) { const auto &Ptr = S.Stk.pop<Pointer>(); - return Composite(Ptr.getType(), Ptr, Result); + + if (!EvalResult.checkReturnValue(S, Ctx, Ptr, Info)) + return false; + if (CheckFullyInitialized && !EvalResult.checkFullyInitialized(S, Ptr)) + return false; + + if (!checkReturnState(S)) + return false; + + if (std::optional<APValue> APV = + Ptr.toRValue(S.getCtx(), EvalResult.getSourceType())) { + EvalResult.setValue(*APV); + return true; + } + + EvalResult.setInvalid(); + return false; } bool EvalEmitter::emitGetPtrLocal(uint32_t I, const SourceInfo &Info) { if (!isActive()) return true; - auto It = Locals.find(I); - assert(It != Locals.end() && "Missing local variable"); - S.Stk.push<Pointer>(reinterpret_cast<Block *>(It->second.get())); + Block *B = getLocal(I); + S.Stk.push<Pointer>(B, sizeof(InlineDescriptor)); return true; } @@ -210,10 +241,8 @@ bool EvalEmitter::emitGetLocal(uint32_t I, const SourceInfo &Info) { using T = typename PrimConv<OpType>::T; - auto It = Locals.find(I); - assert(It != Locals.end() && "Missing local variable"); - auto *B = reinterpret_cast<Block *>(It->second.get()); - S.Stk.push<T>(*reinterpret_cast<T *>(B + 1)); + Block *B = getLocal(I); + S.Stk.push<T>(*reinterpret_cast<T *>(B->data())); return true; } @@ -224,10 +253,11 @@ bool EvalEmitter::emitSetLocal(uint32_t I, const SourceInfo &Info) { using T = typename PrimConv<OpType>::T; - auto It = Locals.find(I); - assert(It != Locals.end() && "Missing local variable"); - auto *B = reinterpret_cast<Block *>(It->second.get()); - *reinterpret_cast<T *>(B + 1) = S.Stk.pop<T>(); + Block *B = getLocal(I); + *reinterpret_cast<T *>(B->data()) = S.Stk.pop<T>(); + InlineDescriptor &Desc = *reinterpret_cast<InlineDescriptor *>(B->rawData()); + Desc.IsInitialized = true; + return true; } @@ -236,14 +266,37 @@ bool EvalEmitter::emitDestroy(uint32_t I, const SourceInfo &Info) { return true; for (auto &Local : Descriptors[I]) { - auto It = Locals.find(Local.Offset); - assert(It != Locals.end() && "Missing local variable"); - S.deallocate(reinterpret_cast<Block *>(It->second.get())); + Block *B = getLocal(Local.Offset); + S.deallocate(B); } return true; } +/// Global temporaries (LifetimeExtendedTemporary) carry their value +/// around as an APValue, which codegen accesses. +/// We set their value once when creating them, but we don't update it +/// afterwards when code changes it later. +/// This is what we do here. +void EvalEmitter::updateGlobalTemporaries() { + for (const auto &[E, Temp] : S.SeenGlobalTemporaries) { + if (std::optional<unsigned> GlobalIndex = P.getGlobal(E)) { + const Pointer &Ptr = P.getPtrGlobal(*GlobalIndex); + APValue *Cached = Temp->getOrCreateValue(true); + + if (std::optional<PrimType> T = Ctx.classify(E->getType())) { + TYPE_SWITCH( + *T, { *Cached = Ptr.deref<T>().toAPValue(Ctx.getASTContext()); }); + } else { + if (std::optional<APValue> APV = + Ptr.toRValue(Ctx, Temp->getTemporaryExpr()->getType())) + *Cached = *APV; + } + } + } + S.SeenGlobalTemporaries.clear(); +} + //===----------------------------------------------------------------------===// // Opcode evaluators //===----------------------------------------------------------------------===// diff --git a/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.h b/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.h index eec2ff8ee753..338786d3dea9 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.h +++ b/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.h @@ -13,23 +13,18 @@ #ifndef LLVM_CLANG_AST_INTERP_EVALEMITTER_H #define LLVM_CLANG_AST_INTERP_EVALEMITTER_H -#include "ByteCodeGenError.h" -#include "Context.h" -#include "InterpStack.h" +#include "EvaluationResult.h" #include "InterpState.h" #include "PrimType.h" -#include "Program.h" #include "Source.h" #include "llvm/Support/Error.h" namespace clang { -class FunctionDecl; namespace interp { class Context; class Function; -class InterpState; +class InterpStack; class Program; -class SourceInfo; enum Opcode : uint32_t; /// An emitter which evaluates opcodes as they are emitted. @@ -39,14 +34,19 @@ public: using AddrTy = uintptr_t; using Local = Scope::Local; - llvm::Expected<bool> interpretExpr(const Expr *E); - llvm::Expected<bool> interpretDecl(const VarDecl *VD); + EvaluationResult interpretExpr(const Expr *E, + bool ConvertResultToRValue = false); + EvaluationResult interpretDecl(const VarDecl *VD, bool CheckFullyInitialized); + + /// Clean up all resources. + void cleanup(); + + InterpState &getState() { return S; } protected: - EvalEmitter(Context &Ctx, Program &P, State &Parent, InterpStack &Stk, - APValue &Result); + EvalEmitter(Context &Ctx, Program &P, State &Parent, InterpStack &Stk); - virtual ~EvalEmitter() {} + virtual ~EvalEmitter(); /// Define a label. void emitLabel(LabelTy Label); @@ -55,11 +55,8 @@ protected: /// Methods implemented by the compiler. virtual bool visitExpr(const Expr *E) = 0; - virtual bool visitDecl(const VarDecl *VD) = 0; - - bool bail(const Stmt *S) { return bail(S->getBeginLoc()); } - bool bail(const Decl *D) { return bail(D->getBeginLoc()); } - bool bail(const SourceLocation &Loc); + virtual bool visitDeclAndReturn(const VarDecl *VD, bool ConstantContext) = 0; + virtual bool visitFunc(const FunctionDecl *F) = 0; /// Emits jumps. bool jumpTrue(const LabelTy &Label); @@ -67,16 +64,24 @@ protected: bool jump(const LabelTy &Label); bool fallthrough(const LabelTy &Label); + /// Since expressions can only jump forward, predicated execution is + /// used to deal with if-else statements. + bool isActive() const { return CurrentLabel == ActiveLabel; } + /// Callback for registering a local. Local createLocal(Descriptor *D); /// Returns the source location of the current opcode. - SourceInfo getSource(Function *F, CodePtr PC) const override { - return F ? F->getSource(PC) : CurrentSource; + SourceInfo getSource(const Function *F, CodePtr PC) const override { + return (F && F->hasBody()) ? F->getSource(PC) : CurrentSource; } /// Parameter indices. - llvm::DenseMap<const ParmVarDecl *, unsigned> Params; + llvm::DenseMap<const ParmVarDecl *, ParamOffset> Params; + /// Lambda captures. + llvm::DenseMap<const ValueDecl *, ParamOffset> LambdaCaptures; + /// Offset of the This parameter in a lambda record. + ParamOffset LambdaThisCapture{0, false}; /// Local descriptors. llvm::SmallVector<SmallVector<Local, 8>, 2> Descriptors; @@ -88,16 +93,27 @@ private: /// Callee evaluation state. InterpState S; /// Location to write the result to. - APValue &Result; + EvaluationResult EvalResult; + /// Whether the result should be converted to an RValue. + bool ConvertResultToRValue = false; + /// Whether we should check if the result has been fully + /// initialized. + bool CheckFullyInitialized = false; /// Temporaries which require storage. llvm::DenseMap<unsigned, std::unique_ptr<char[]>> Locals; + Block *getLocal(unsigned Index) const { + auto It = Locals.find(Index); + assert(It != Locals.end() && "Missing local variable"); + return reinterpret_cast<Block *>(It->second.get()); + } + + void updateGlobalTemporaries(); + // The emitter always tracks the current instruction and sets OpPC to a token // value which is mapped to the location of the opcode being evaluated. CodePtr OpPC; - /// Location of a failure. - llvm::Optional<SourceLocation> BailLocation; /// Location of the current instruction. SourceInfo CurrentSource; @@ -108,15 +124,6 @@ private: /// Active block which should be executed. LabelTy ActiveLabel = 0; - /// Since expressions can only jump forward, predicated execution is - /// used to deal with if-else statements. - bool isActive() { return CurrentLabel == ActiveLabel; } - - /// Helper to invoke a method. - bool ExecuteCall(Function *F, Pointer &&This, const SourceInfo &Info); - /// Helper to emit a diagnostic on a missing method. - bool ExecuteNoCall(const FunctionDecl *F, const SourceInfo &Info); - protected: #define GET_EVAL_PROTO #include "Opcodes.inc" diff --git a/contrib/llvm-project/clang/lib/AST/Interp/EvaluationResult.cpp b/contrib/llvm-project/clang/lib/AST/Interp/EvaluationResult.cpp new file mode 100644 index 000000000000..1b255711c7b3 --- /dev/null +++ b/contrib/llvm-project/clang/lib/AST/Interp/EvaluationResult.cpp @@ -0,0 +1,244 @@ +//===----- EvaluationResult.cpp - Result class for the VM ------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "EvaluationResult.h" +#include "InterpState.h" +#include "Record.h" +#include "clang/AST/ExprCXX.h" +#include "llvm/ADT/SetVector.h" + +namespace clang { +namespace interp { + +APValue EvaluationResult::toAPValue() const { + assert(!empty()); + switch (Kind) { + case LValue: + // Either a pointer or a function pointer. + if (const auto *P = std::get_if<Pointer>(&Value)) + return P->toAPValue(Ctx->getASTContext()); + else if (const auto *FP = std::get_if<FunctionPointer>(&Value)) + return FP->toAPValue(Ctx->getASTContext()); + else + llvm_unreachable("Unhandled LValue type"); + break; + case RValue: + return std::get<APValue>(Value); + case Valid: + return APValue(); + default: + llvm_unreachable("Unhandled result kind?"); + } +} + +std::optional<APValue> EvaluationResult::toRValue() const { + if (Kind == RValue) + return toAPValue(); + + assert(Kind == LValue); + + // We have a pointer and want an RValue. + if (const auto *P = std::get_if<Pointer>(&Value)) + return P->toRValue(*Ctx, getSourceType()); + else if (const auto *FP = std::get_if<FunctionPointer>(&Value)) // Nope + return FP->toAPValue(Ctx->getASTContext()); + llvm_unreachable("Unhandled lvalue kind"); +} + +static void DiagnoseUninitializedSubobject(InterpState &S, SourceLocation Loc, + const FieldDecl *SubObjDecl) { + assert(SubObjDecl && "Subobject declaration does not exist"); + S.FFDiag(Loc, diag::note_constexpr_uninitialized) + << /*(name)*/ 1 << SubObjDecl; + S.Note(SubObjDecl->getLocation(), + diag::note_constexpr_subobject_declared_here); +} + +static bool CheckFieldsInitialized(InterpState &S, SourceLocation Loc, + const Pointer &BasePtr, const Record *R); + +static bool CheckArrayInitialized(InterpState &S, SourceLocation Loc, + const Pointer &BasePtr, + const ConstantArrayType *CAT) { + bool Result = true; + size_t NumElems = CAT->getZExtSize(); + QualType ElemType = CAT->getElementType(); + + if (ElemType->isRecordType()) { + const Record *R = BasePtr.getElemRecord(); + for (size_t I = 0; I != NumElems; ++I) { + Pointer ElemPtr = BasePtr.atIndex(I).narrow(); + Result &= CheckFieldsInitialized(S, Loc, ElemPtr, R); + } + } else if (const auto *ElemCAT = dyn_cast<ConstantArrayType>(ElemType)) { + for (size_t I = 0; I != NumElems; ++I) { + Pointer ElemPtr = BasePtr.atIndex(I).narrow(); + Result &= CheckArrayInitialized(S, Loc, ElemPtr, ElemCAT); + } + } else { + for (size_t I = 0; I != NumElems; ++I) { + if (!BasePtr.atIndex(I).isInitialized()) { + DiagnoseUninitializedSubobject(S, Loc, BasePtr.getField()); + Result = false; + } + } + } + + return Result; +} + +static bool CheckFieldsInitialized(InterpState &S, SourceLocation Loc, + const Pointer &BasePtr, const Record *R) { + assert(R); + bool Result = true; + // Check all fields of this record are initialized. + for (const Record::Field &F : R->fields()) { + Pointer FieldPtr = BasePtr.atField(F.Offset); + QualType FieldType = F.Decl->getType(); + + // Don't check inactive union members. + if (R->isUnion() && !FieldPtr.isActive()) + continue; + + if (FieldType->isRecordType()) { + Result &= CheckFieldsInitialized(S, Loc, FieldPtr, FieldPtr.getRecord()); + } else if (FieldType->isIncompleteArrayType()) { + // Nothing to do here. + } else if (F.Decl->isUnnamedBitField()) { + // Nothing do do here. + } else if (FieldType->isArrayType()) { + const auto *CAT = + cast<ConstantArrayType>(FieldType->getAsArrayTypeUnsafe()); + Result &= CheckArrayInitialized(S, Loc, FieldPtr, CAT); + } else if (!FieldPtr.isInitialized()) { + DiagnoseUninitializedSubobject(S, Loc, F.Decl); + Result = false; + } + } + + // Check Fields in all bases + for (const Record::Base &B : R->bases()) { + Pointer P = BasePtr.atField(B.Offset); + if (!P.isInitialized()) { + const Descriptor *Desc = BasePtr.getDeclDesc(); + if (Desc->asDecl()) + S.FFDiag(BasePtr.getDeclDesc()->asDecl()->getLocation(), + diag::note_constexpr_uninitialized_base) + << B.Desc->getType(); + else + S.FFDiag(BasePtr.getDeclDesc()->asExpr()->getExprLoc(), + diag::note_constexpr_uninitialized_base) + << B.Desc->getType(); + + return false; + } + Result &= CheckFieldsInitialized(S, Loc, P, B.R); + } + + // TODO: Virtual bases + + return Result; +} + +bool EvaluationResult::checkFullyInitialized(InterpState &S, + const Pointer &Ptr) const { + assert(Source); + assert(empty()); + + if (Ptr.isZero()) + return true; + + // We can't inspect dead pointers at all. Return true here so we can + // diagnose them later. + if (!Ptr.isLive()) + return true; + + SourceLocation InitLoc; + if (const auto *D = Source.dyn_cast<const Decl *>()) + InitLoc = cast<VarDecl>(D)->getAnyInitializer()->getExprLoc(); + else if (const auto *E = Source.dyn_cast<const Expr *>()) + InitLoc = E->getExprLoc(); + + if (const Record *R = Ptr.getRecord()) + return CheckFieldsInitialized(S, InitLoc, Ptr, R); + + if (const auto *CAT = dyn_cast_if_present<ConstantArrayType>( + Ptr.getType()->getAsArrayTypeUnsafe())) + return CheckArrayInitialized(S, InitLoc, Ptr, CAT); + + return true; +} + +static void collectBlocks(const Pointer &Ptr, + llvm::SetVector<const Block *> &Blocks) { + auto isUsefulPtr = [](const Pointer &P) -> bool { + return P.isLive() && !P.isZero() && !P.isDummy() && + !P.isUnknownSizeArray() && !P.isOnePastEnd() && P.isBlockPointer(); + }; + + if (!isUsefulPtr(Ptr)) + return; + + Blocks.insert(Ptr.block()); + + const Descriptor *Desc = Ptr.getFieldDesc(); + if (!Desc) + return; + + if (const Record *R = Desc->ElemRecord) { + for (const Record::Field &F : R->fields()) { + const Pointer &FieldPtr = Ptr.atField(F.Offset); + assert(FieldPtr.block() == Ptr.block()); + collectBlocks(FieldPtr, Blocks); + } + } else if (Desc->isPrimitive() && Desc->getPrimType() == PT_Ptr) { + const Pointer &Pointee = Ptr.deref<Pointer>(); + if (isUsefulPtr(Pointee) && !Blocks.contains(Pointee.block())) + collectBlocks(Pointee, Blocks); + + } else if (Desc->isPrimitiveArray() && Desc->getPrimType() == PT_Ptr) { + for (unsigned I = 0; I != Desc->getNumElems(); ++I) { + const Pointer &ElemPointee = Ptr.atIndex(I).deref<Pointer>(); + if (isUsefulPtr(ElemPointee) && !Blocks.contains(ElemPointee.block())) + collectBlocks(ElemPointee, Blocks); + } + } else if (Desc->isCompositeArray()) { + for (unsigned I = 0; I != Desc->getNumElems(); ++I) { + const Pointer &ElemPtr = Ptr.atIndex(I).narrow(); + collectBlocks(ElemPtr, Blocks); + } + } +} + +bool EvaluationResult::checkReturnValue(InterpState &S, const Context &Ctx, + const Pointer &Ptr, + const SourceInfo &Info) { + // Collect all blocks that this pointer (transitively) points to and + // return false if any of them is a dynamic block. + llvm::SetVector<const Block *> Blocks; + + collectBlocks(Ptr, Blocks); + + for (const Block *B : Blocks) { + if (B->isDynamic()) { + assert(B->getDescriptor()); + assert(B->getDescriptor()->asExpr()); + + S.FFDiag(Info, diag::note_constexpr_dynamic_alloc) + << Ptr.getType()->isReferenceType() << !Ptr.isRoot(); + S.Note(B->getDescriptor()->asExpr()->getExprLoc(), + diag::note_constexpr_dynamic_alloc_here); + return false; + } + } + + return true; +} + +} // namespace interp +} // namespace clang diff --git a/contrib/llvm-project/clang/lib/AST/Interp/EvaluationResult.h b/contrib/llvm-project/clang/lib/AST/Interp/EvaluationResult.h new file mode 100644 index 000000000000..ef662e3779bc --- /dev/null +++ b/contrib/llvm-project/clang/lib/AST/Interp/EvaluationResult.h @@ -0,0 +1,127 @@ +//===------ EvaluationResult.h - Result class for the VM -------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_AST_INTERP_EVALUATION_RESULT_H +#define LLVM_CLANG_AST_INTERP_EVALUATION_RESULT_H + +#include "FunctionPointer.h" +#include "Pointer.h" +#include "clang/AST/APValue.h" +#include "clang/AST/Decl.h" +#include "clang/AST/Expr.h" +#include <optional> +#include <variant> + +namespace clang { +namespace interp { +class EvalEmitter; +class Context; + +/// Defines the result of an evaluation. +/// +/// The result might be in different forms--one of the pointer types, +/// an APValue, or nothing. +/// +/// We use this class to inspect and diagnose the result, as well as +/// convert it to the requested form. +class EvaluationResult final { +public: + enum ResultKind { + Empty, // Initial state. + LValue, // Result is an lvalue/pointer. + RValue, // Result is an rvalue. + Invalid, // Result is invalid. + Valid, // Result is valid and empty. + }; + + using DeclTy = llvm::PointerUnion<const Decl *, const Expr *>; + +private: + const Context *Ctx = nullptr; + std::variant<std::monostate, Pointer, FunctionPointer, APValue> Value; + ResultKind Kind = Empty; + DeclTy Source = nullptr; // Currently only needed for dump(). + + EvaluationResult(ResultKind Kind) : Kind(Kind) { + // Leave everything empty. Can be used as an + // error marker or for void return values. + assert(Kind == Valid || Kind == Invalid); + } + + void setSource(DeclTy D) { Source = D; } + + void setValue(const APValue &V) { + // V could still be an LValue. + assert(empty()); + Value = std::move(V); + Kind = RValue; + } + void setPointer(const Pointer P) { + assert(empty()); + Value = P; + Kind = LValue; + } + void setFunctionPointer(const FunctionPointer &P) { + assert(empty()); + Value = P; + Kind = LValue; + } + void setInvalid() { + // We are NOT asserting empty() here, since setting it to invalid + // is allowed even if there is already a result. + Kind = Invalid; + } + void setValid() { + assert(empty()); + Kind = Valid; + } + +public: + EvaluationResult(const Context *Ctx) : Ctx(Ctx) {} + + bool empty() const { return Kind == Empty; } + bool isInvalid() const { return Kind == Invalid; } + bool isLValue() const { return Kind == LValue; } + bool isRValue() const { return Kind == RValue; } + + /// Returns an APValue for the evaluation result. The returned + /// APValue might be an LValue or RValue. + APValue toAPValue() const; + + /// If the result is an LValue, convert that to an RValue + /// and return it. This may fail, e.g. if the result is an + /// LValue and we can't read from it. + std::optional<APValue> toRValue() const; + + /// Check that all subobjects of the given pointer have been initialized. + bool checkFullyInitialized(InterpState &S, const Pointer &Ptr) const; + /// Check that none of the blocks the given pointer (transitively) points + /// to are dynamically allocated. + bool checkReturnValue(InterpState &S, const Context &Ctx, const Pointer &Ptr, + const SourceInfo &Info); + + QualType getSourceType() const { + if (const auto *D = + dyn_cast_if_present<ValueDecl>(Source.dyn_cast<const Decl *>())) + return D->getType(); + else if (const auto *E = Source.dyn_cast<const Expr *>()) + return E->getType(); + return QualType(); + } + + /// Dump to stderr. + void dump() const; + + friend class EvalEmitter; + friend class InterpState; +}; + +} // namespace interp +} // namespace clang + +#endif diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Floating.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Floating.cpp new file mode 100644 index 000000000000..922e17ad1450 --- /dev/null +++ b/contrib/llvm-project/clang/lib/AST/Interp/Floating.cpp @@ -0,0 +1,22 @@ +//===---- Floating.cpp - Support for floating point values ------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "Floating.h" + +namespace clang { +namespace interp { + +llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, Floating F) { + F.print(OS); + return OS; +} + +Floating getSwappedBytes(Floating F) { return F; } + +} // namespace interp +} // namespace clang diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Floating.h b/contrib/llvm-project/clang/lib/AST/Interp/Floating.h new file mode 100644 index 000000000000..114487821880 --- /dev/null +++ b/contrib/llvm-project/clang/lib/AST/Interp/Floating.h @@ -0,0 +1,218 @@ +//===--- Floating.h - Types for the constexpr VM ----------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Defines the VM types and helpers operating on types. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_AST_INTERP_FLOATING_H +#define LLVM_CLANG_AST_INTERP_FLOATING_H + +#include "Primitives.h" +#include "clang/AST/APValue.h" +#include "llvm/ADT/APFloat.h" + +namespace clang { +namespace interp { + +using APFloat = llvm::APFloat; +using APSInt = llvm::APSInt; + +class Floating final { +private: + // The underlying value storage. + APFloat F; + +public: + /// Zero-initializes a Floating. + Floating() : F(0.0f) {} + Floating(const APFloat &F) : F(F) {} + + // Static constructors for special floating point values. + static Floating getInf(const llvm::fltSemantics &Sem) { + return Floating(APFloat::getInf(Sem)); + } + const APFloat &getAPFloat() const { return F; } + + bool operator<(Floating RHS) const { return F < RHS.F; } + bool operator>(Floating RHS) const { return F > RHS.F; } + bool operator<=(Floating RHS) const { return F <= RHS.F; } + bool operator>=(Floating RHS) const { return F >= RHS.F; } + bool operator==(Floating RHS) const { return F == RHS.F; } + bool operator!=(Floating RHS) const { return F != RHS.F; } + Floating operator-() const { return Floating(-F); } + + APFloat::opStatus convertToInteger(APSInt &Result) const { + bool IsExact; + return F.convertToInteger(Result, llvm::APFloat::rmTowardZero, &IsExact); + } + + Floating toSemantics(const llvm::fltSemantics *Sem, + llvm::RoundingMode RM) const { + APFloat Copy = F; + bool LosesInfo; + Copy.convert(*Sem, RM, &LosesInfo); + (void)LosesInfo; + return Floating(Copy); + } + + /// Convert this Floating to one with the same semantics as \Other. + Floating toSemantics(const Floating &Other, llvm::RoundingMode RM) const { + return toSemantics(&Other.F.getSemantics(), RM); + } + + APSInt toAPSInt(unsigned NumBits = 0) const { + return APSInt(F.bitcastToAPInt()); + } + APValue toAPValue(const ASTContext &) const { return APValue(F); } + void print(llvm::raw_ostream &OS) const { + // Can't use APFloat::print() since it appends a newline. + SmallVector<char, 16> Buffer; + F.toString(Buffer); + OS << Buffer; + } + std::string toDiagnosticString(const ASTContext &Ctx) const { + std::string NameStr; + llvm::raw_string_ostream OS(NameStr); + print(OS); + return NameStr; + } + + unsigned bitWidth() const { return F.semanticsSizeInBits(F.getSemantics()); } + + bool isSigned() const { return true; } + bool isNegative() const { return F.isNegative(); } + bool isPositive() const { return !F.isNegative(); } + bool isZero() const { return F.isZero(); } + bool isNonZero() const { return F.isNonZero(); } + bool isMin() const { return F.isSmallest(); } + bool isMinusOne() const { return F.isExactlyValue(-1.0); } + bool isNan() const { return F.isNaN(); } + bool isSignaling() const { return F.isSignaling(); } + bool isInf() const { return F.isInfinity(); } + bool isFinite() const { return F.isFinite(); } + bool isNormal() const { return F.isNormal(); } + bool isDenormal() const { return F.isDenormal(); } + llvm::FPClassTest classify() const { return F.classify(); } + APFloat::fltCategory getCategory() const { return F.getCategory(); } + + ComparisonCategoryResult compare(const Floating &RHS) const { + llvm::APFloatBase::cmpResult CmpRes = F.compare(RHS.F); + switch (CmpRes) { + case llvm::APFloatBase::cmpLessThan: + return ComparisonCategoryResult::Less; + case llvm::APFloatBase::cmpEqual: + return ComparisonCategoryResult::Equal; + case llvm::APFloatBase::cmpGreaterThan: + return ComparisonCategoryResult::Greater; + case llvm::APFloatBase::cmpUnordered: + return ComparisonCategoryResult::Unordered; + } + llvm_unreachable("Inavlid cmpResult value"); + } + + static APFloat::opStatus fromIntegral(APSInt Val, + const llvm::fltSemantics &Sem, + llvm::RoundingMode RM, + Floating &Result) { + APFloat F = APFloat(Sem); + APFloat::opStatus Status = F.convertFromAPInt(Val, Val.isSigned(), RM); + Result = Floating(F); + return Status; + } + + static Floating bitcastFromMemory(const std::byte *Buff, + const llvm::fltSemantics &Sem) { + size_t Size = APFloat::semanticsSizeInBits(Sem); + llvm::APInt API(Size, true); + llvm::LoadIntFromMemory(API, (const uint8_t *)Buff, Size / 8); + + return Floating(APFloat(Sem, API)); + } + + // === Serialization support === + size_t bytesToSerialize() const { + return sizeof(llvm::fltSemantics *) + + (APFloat::semanticsSizeInBits(F.getSemantics()) / 8); + } + + void serialize(std::byte *Buff) const { + // Semantics followed by an APInt. + *reinterpret_cast<const llvm::fltSemantics **>(Buff) = &F.getSemantics(); + + llvm::APInt API = F.bitcastToAPInt(); + llvm::StoreIntToMemory(API, (uint8_t *)(Buff + sizeof(void *)), + bitWidth() / 8); + } + + static Floating deserialize(const std::byte *Buff) { + const llvm::fltSemantics *Sem; + std::memcpy((void *)&Sem, Buff, sizeof(void *)); + return bitcastFromMemory(Buff + sizeof(void *), *Sem); + } + + static Floating abs(const Floating &F) { + APFloat V = F.F; + if (V.isNegative()) + V.changeSign(); + return Floating(V); + } + + // ------- + + static APFloat::opStatus add(const Floating &A, const Floating &B, + llvm::RoundingMode RM, Floating *R) { + *R = Floating(A.F); + return R->F.add(B.F, RM); + } + + static APFloat::opStatus increment(const Floating &A, llvm::RoundingMode RM, + Floating *R) { + APFloat One(A.F.getSemantics(), 1); + *R = Floating(A.F); + return R->F.add(One, RM); + } + + static APFloat::opStatus sub(const Floating &A, const Floating &B, + llvm::RoundingMode RM, Floating *R) { + *R = Floating(A.F); + return R->F.subtract(B.F, RM); + } + + static APFloat::opStatus decrement(const Floating &A, llvm::RoundingMode RM, + Floating *R) { + APFloat One(A.F.getSemantics(), 1); + *R = Floating(A.F); + return R->F.subtract(One, RM); + } + + static APFloat::opStatus mul(const Floating &A, const Floating &B, + llvm::RoundingMode RM, Floating *R) { + *R = Floating(A.F); + return R->F.multiply(B.F, RM); + } + + static APFloat::opStatus div(const Floating &A, const Floating &B, + llvm::RoundingMode RM, Floating *R) { + *R = Floating(A.F); + return R->F.divide(B.F, RM); + } + + static bool neg(const Floating &A, Floating *R) { + *R = -A; + return false; + } +}; + +llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, Floating F); +Floating getSwappedBytes(Floating F); + +} // namespace interp +} // namespace clang + +#endif diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Frame.h b/contrib/llvm-project/clang/lib/AST/Interp/Frame.h index b9a0ea9412f8..079e4259b0ae 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/Frame.h +++ b/contrib/llvm-project/clang/lib/AST/Interp/Frame.h @@ -27,13 +27,13 @@ public: virtual ~Frame(); /// Generates a human-readable description of the call site. - virtual void describe(llvm::raw_ostream &OS) = 0; + virtual void describe(llvm::raw_ostream &OS) const = 0; /// Returns a pointer to the caller frame. virtual Frame *getCaller() const = 0; /// Returns the location of the call site. - virtual SourceLocation getCallLocation() const = 0; + virtual SourceRange getCallRange() const = 0; /// Returns the called function's declaration. virtual const FunctionDecl *getCallee() const = 0; diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Function.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Function.cpp index 0ed13a92aa38..00f5a1fced53 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/Function.cpp +++ b/contrib/llvm-project/clang/lib/AST/Interp/Function.cpp @@ -7,23 +7,25 @@ //===----------------------------------------------------------------------===// #include "Function.h" -#include "Program.h" #include "Opcode.h" +#include "Program.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" +#include "clang/Basic/Builtins.h" using namespace clang; using namespace clang::interp; Function::Function(Program &P, const FunctionDecl *F, unsigned ArgSize, - llvm::SmallVector<PrimType, 8> &&ParamTypes, - llvm::DenseMap<unsigned, ParamDescriptor> &&Params) + llvm::SmallVectorImpl<PrimType> &&ParamTypes, + llvm::DenseMap<unsigned, ParamDescriptor> &&Params, + llvm::SmallVectorImpl<unsigned> &&ParamOffsets, + bool HasThisPointer, bool HasRVO, bool UnevaluatedBuiltin) : P(P), Loc(F->getBeginLoc()), F(F), ArgSize(ArgSize), - ParamTypes(std::move(ParamTypes)), Params(std::move(Params)) {} - -CodePtr Function::getCodeBegin() const { return Code.data(); } - -CodePtr Function::getCodeEnd() const { return Code.data() + Code.size(); } + ParamTypes(std::move(ParamTypes)), Params(std::move(Params)), + ParamOffsets(std::move(ParamOffsets)), HasThisPointer(HasThisPointer), + HasRVO(HasRVO), Variadic(F->isVariadic()), + IsUnevaluatedBuiltin(UnevaluatedBuiltin) {} Function::ParamDescriptor Function::getParamDescriptor(unsigned Offset) const { auto It = Params.find(Offset); @@ -32,17 +34,19 @@ Function::ParamDescriptor Function::getParamDescriptor(unsigned Offset) const { } SourceInfo Function::getSource(CodePtr PC) const { + assert(PC >= getCodeBegin() && "PC does not belong to this function"); + assert(PC <= getCodeEnd() && "PC Does not belong to this function"); + assert(hasBody() && "Function has no body"); unsigned Offset = PC - getCodeBegin(); using Elem = std::pair<unsigned, SourceInfo>; - auto It = std::lower_bound(SrcMap.begin(), SrcMap.end(), Elem{Offset, {}}, - [](Elem A, Elem B) { return A.first < B.first; }); - if (It == SrcMap.end() || It->first != Offset) - llvm::report_fatal_error("missing source location"); + auto It = llvm::lower_bound(SrcMap, Elem{Offset, {}}, llvm::less_first()); + if (It == SrcMap.end()) + return SrcMap.back().second; return It->second; } bool Function::isVirtual() const { - if (auto *M = dyn_cast<CXXMethodDecl>(F)) + if (const auto *M = dyn_cast<CXXMethodDecl>(F)) return M->isVirtual(); return false; } diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Function.h b/contrib/llvm-project/clang/lib/AST/Interp/Function.h index 28531f04b6e9..92bcd9692791 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/Function.h +++ b/contrib/llvm-project/clang/lib/AST/Interp/Function.h @@ -15,8 +15,10 @@ #ifndef LLVM_CLANG_AST_INTERP_FUNCTION_H #define LLVM_CLANG_AST_INTERP_FUNCTION_H -#include "Pointer.h" +#include "Descriptor.h" #include "Source.h" +#include "clang/AST/ASTLambda.h" +#include "clang/AST/Attr.h" #include "clang/AST/Decl.h" #include "llvm/Support/raw_ostream.h" @@ -24,12 +26,13 @@ namespace clang { namespace interp { class Program; class ByteCodeEmitter; +class Pointer; enum PrimType : uint32_t; /// Describes a scope block. /// /// The block gathers all the descriptors of the locals defined in this block. -class Scope { +class Scope final { public: /// Information about a local's storage. struct Local { @@ -43,7 +46,7 @@ public: Scope(LocalVectorTy &&Descriptors) : Descriptors(std::move(Descriptors)) {} - llvm::iterator_range<LocalVectorTy::iterator> locals() { + llvm::iterator_range<LocalVectorTy::const_iterator> locals() const { return llvm::make_range(Descriptors.begin(), Descriptors.end()); } @@ -56,74 +59,176 @@ private: /// /// Contains links to the bytecode of the function, as well as metadata /// describing all arguments and stack-local variables. -class Function { +/// +/// # Calling Convention +/// +/// When calling a function, all argument values must be on the stack. +/// +/// If the function has a This pointer (i.e. hasThisPointer() returns true, +/// the argument values need to be preceeded by a Pointer for the This object. +/// +/// If the function uses Return Value Optimization, the arguments (and +/// potentially the This pointer) need to be preceeded by a Pointer pointing +/// to the location to construct the returned value. +/// +/// After the function has been called, it will remove all arguments, +/// including RVO and This pointer, from the stack. +/// +class Function final { public: using ParamDescriptor = std::pair<PrimType, Descriptor *>; /// Returns the size of the function's local stack. unsigned getFrameSize() const { return FrameSize; } - /// Returns the size of the argument stackx + /// Returns the size of the argument stack. unsigned getArgSize() const { return ArgSize; } /// Returns a pointer to the start of the code. - CodePtr getCodeBegin() const; + CodePtr getCodeBegin() const { return Code.data(); } /// Returns a pointer to the end of the code. - CodePtr getCodeEnd() const; + CodePtr getCodeEnd() const { return Code.data() + Code.size(); } /// Returns the original FunctionDecl. const FunctionDecl *getDecl() const { return F; } - /// Returns the lcoation. + /// Returns the name of the function decl this code + /// was generated for. + const std::string getName() const { + if (!F) + return "<<expr>>"; + + return F->getQualifiedNameAsString(); + } + + /// Returns the location. SourceLocation getLoc() const { return Loc; } /// Returns a parameter descriptor. ParamDescriptor getParamDescriptor(unsigned Offset) const; /// Checks if the first argument is a RVO pointer. - bool hasRVO() const { return ParamTypes.size() != Params.size(); } + bool hasRVO() const { return HasRVO; } + + bool hasNonNullAttr() const { return getDecl()->hasAttr<NonNullAttr>(); } /// Range over the scope blocks. - llvm::iterator_range<llvm::SmallVector<Scope, 2>::iterator> scopes() { + llvm::iterator_range<llvm::SmallVector<Scope, 2>::const_iterator> + scopes() const { return llvm::make_range(Scopes.begin(), Scopes.end()); } /// Range over argument types. - using arg_reverse_iterator = SmallVectorImpl<PrimType>::reverse_iterator; - llvm::iterator_range<arg_reverse_iterator> args_reverse() { - return llvm::make_range(ParamTypes.rbegin(), ParamTypes.rend()); + using arg_reverse_iterator = + SmallVectorImpl<PrimType>::const_reverse_iterator; + llvm::iterator_range<arg_reverse_iterator> args_reverse() const { + return llvm::reverse(ParamTypes); } /// Returns a specific scope. Scope &getScope(unsigned Idx) { return Scopes[Idx]; } + const Scope &getScope(unsigned Idx) const { return Scopes[Idx]; } /// Returns the source information at a given PC. SourceInfo getSource(CodePtr PC) const; /// Checks if the function is valid to call in constexpr. - bool isConstexpr() const { return IsValid; } + bool isConstexpr() const { return IsValid || isLambdaStaticInvoker(); } /// Checks if the function is virtual. bool isVirtual() const; /// Checks if the function is a constructor. bool isConstructor() const { return isa<CXXConstructorDecl>(F); } + /// Checks if the function is a destructor. + bool isDestructor() const { return isa<CXXDestructorDecl>(F); } + + /// Returns the parent record decl, if any. + const CXXRecordDecl *getParentDecl() const { + if (const auto *MD = dyn_cast<CXXMethodDecl>(F)) + return MD->getParent(); + return nullptr; + } + + /// Returns whether this function is a lambda static invoker, + /// which we generate custom byte code for. + bool isLambdaStaticInvoker() const { + if (const auto *MD = dyn_cast<CXXMethodDecl>(F)) + return MD->isLambdaStaticInvoker(); + return false; + } + + /// Returns whether this function is the call operator + /// of a lambda record decl. + bool isLambdaCallOperator() const { + if (const auto *MD = dyn_cast<CXXMethodDecl>(F)) + return clang::isLambdaCallOperator(MD); + return false; + } + + /// Checks if the function is fully done compiling. + bool isFullyCompiled() const { return IsFullyCompiled; } + + bool hasThisPointer() const { return HasThisPointer; } + + /// Checks if the function already has a body attached. + bool hasBody() const { return HasBody; } + + /// Checks if the function is defined. + bool isDefined() const { return Defined; } + + bool isVariadic() const { return Variadic; } + + unsigned getBuiltinID() const { return F->getBuiltinID(); } + + bool isBuiltin() const { return F->getBuiltinID() != 0; } + + bool isUnevaluatedBuiltin() const { return IsUnevaluatedBuiltin; } + + unsigned getNumParams() const { return ParamTypes.size(); } + + /// Returns the number of parameter this function takes when it's called, + /// i.e excluding the instance pointer and the RVO pointer. + unsigned getNumWrittenParams() const { + assert(getNumParams() >= (unsigned)(hasThisPointer() + hasRVO())); + return getNumParams() - hasThisPointer() - hasRVO(); + } + unsigned getWrittenArgSize() const { + return ArgSize - (align(primSize(PT_Ptr)) * (hasThisPointer() + hasRVO())); + } + + bool isThisPointerExplicit() const { + if (const auto *MD = dyn_cast<CXXMethodDecl>(F)) + return MD->isExplicitObjectMemberFunction(); + return false; + } + + unsigned getParamOffset(unsigned ParamIndex) const { + return ParamOffsets[ParamIndex]; + } private: /// Construct a function representing an actual function. Function(Program &P, const FunctionDecl *F, unsigned ArgSize, - llvm::SmallVector<PrimType, 8> &&ParamTypes, - llvm::DenseMap<unsigned, ParamDescriptor> &&Params); + llvm::SmallVectorImpl<PrimType> &&ParamTypes, + llvm::DenseMap<unsigned, ParamDescriptor> &&Params, + llvm::SmallVectorImpl<unsigned> &&ParamOffsets, bool HasThisPointer, + bool HasRVO, bool UnevaluatedBuiltin); /// Sets the code of a function. - void setCode(unsigned NewFrameSize, std::vector<char> &&NewCode, SourceMap &&NewSrcMap, - llvm::SmallVector<Scope, 2> &&NewScopes) { + void setCode(unsigned NewFrameSize, std::vector<std::byte> &&NewCode, + SourceMap &&NewSrcMap, llvm::SmallVector<Scope, 2> &&NewScopes, + bool NewHasBody) { FrameSize = NewFrameSize; Code = std::move(NewCode); SrcMap = std::move(NewSrcMap); Scopes = std::move(NewScopes); IsValid = true; + HasBody = NewHasBody; } + void setIsFullyCompiled(bool FC) { IsFullyCompiled = FC; } + void setDefined(bool D) { Defined = D; } + private: friend class Program; friend class ByteCodeEmitter; @@ -135,11 +240,11 @@ private: /// Declaration this function was compiled from. const FunctionDecl *F; /// Local area size: storage + metadata. - unsigned FrameSize; + unsigned FrameSize = 0; /// Size of the argument stack. unsigned ArgSize; /// Program code. - std::vector<char> Code; + std::vector<std::byte> Code; /// Opcode-to-expression mapping. SourceMap SrcMap; /// List of block descriptors. @@ -148,8 +253,25 @@ private: llvm::SmallVector<PrimType, 8> ParamTypes; /// Map from byte offset to parameter descriptor. llvm::DenseMap<unsigned, ParamDescriptor> Params; + /// List of parameter offsets. + llvm::SmallVector<unsigned, 8> ParamOffsets; /// Flag to indicate if the function is valid. bool IsValid = false; + /// Flag to indicate if the function is done being + /// compiled to bytecode. + bool IsFullyCompiled = false; + /// Flag indicating if this function takes the this pointer + /// as the first implicit argument + bool HasThisPointer = false; + /// Whether this function has Return Value Optimization, i.e. + /// the return value is constructed in the caller's stack frame. + /// This is done for functions that return non-primive values. + bool HasRVO = false; + /// If we've already compiled the function's body. + bool HasBody = false; + bool Defined = false; + bool Variadic = false; + bool IsUnevaluatedBuiltin = false; public: /// Dumps the disassembled bytecode to \c llvm::errs(). diff --git a/contrib/llvm-project/clang/lib/AST/Interp/FunctionPointer.h b/contrib/llvm-project/clang/lib/AST/Interp/FunctionPointer.h new file mode 100644 index 000000000000..0f2c6e571a1d --- /dev/null +++ b/contrib/llvm-project/clang/lib/AST/Interp/FunctionPointer.h @@ -0,0 +1,95 @@ +//===--- FunctionPointer.h - Types for the constexpr VM ---------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_AST_INTERP_FUNCTION_POINTER_H +#define LLVM_CLANG_AST_INTERP_FUNCTION_POINTER_H + +#include "Function.h" +#include "Primitives.h" +#include "clang/AST/APValue.h" + +namespace clang { +class ASTContext; +namespace interp { + +class FunctionPointer final { +private: + const Function *Func; + bool Valid; + +public: + FunctionPointer(const Function *Func) : Func(Func), Valid(true) { + assert(Func); + } + + FunctionPointer(uintptr_t IntVal = 0, const Descriptor *Desc = nullptr) + : Func(reinterpret_cast<const Function *>(IntVal)), Valid(false) {} + + const Function *getFunction() const { return Func; } + bool isZero() const { return !Func; } + bool isValid() const { return Valid; } + bool isWeak() const { + if (!Func || !Valid) + return false; + + return Func->getDecl()->isWeak(); + } + + APValue toAPValue(const ASTContext &) const { + if (!Func) + return APValue(static_cast<Expr *>(nullptr), CharUnits::Zero(), {}, + /*OnePastTheEnd=*/false, /*IsNull=*/true); + + if (!Valid) + return APValue(static_cast<Expr *>(nullptr), + CharUnits::fromQuantity(getIntegerRepresentation()), {}, + /*OnePastTheEnd=*/false, /*IsNull=*/false); + + return APValue(Func->getDecl(), CharUnits::Zero(), {}, + /*OnePastTheEnd=*/false, /*IsNull=*/false); + } + + void print(llvm::raw_ostream &OS) const { + OS << "FnPtr("; + if (Func && Valid) + OS << Func->getName(); + else if (Func) + OS << reinterpret_cast<uintptr_t>(Func); + else + OS << "nullptr"; + OS << ")"; + } + + std::string toDiagnosticString(const ASTContext &Ctx) const { + if (!Func) + return "nullptr"; + + return toAPValue(Ctx).getAsString(Ctx, Func->getDecl()->getType()); + } + + uint64_t getIntegerRepresentation() const { + return static_cast<uint64_t>(reinterpret_cast<uintptr_t>(Func)); + } + + ComparisonCategoryResult compare(const FunctionPointer &RHS) const { + if (Func == RHS.Func) + return ComparisonCategoryResult::Equal; + return ComparisonCategoryResult::Unordered; + } +}; + +inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, + FunctionPointer FP) { + FP.print(OS); + return OS; +} + +} // namespace interp +} // namespace clang + +#endif diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Integral.h b/contrib/llvm-project/clang/lib/AST/Interp/Integral.h index 46cd611ee389..aafdd02676c9 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/Integral.h +++ b/contrib/llvm-project/clang/lib/AST/Interp/Integral.h @@ -21,21 +21,15 @@ #include <cstddef> #include <cstdint> +#include "Primitives.h" + namespace clang { namespace interp { using APInt = llvm::APInt; using APSInt = llvm::APSInt; -/// Helper to compare two comparable types. -template <typename T> -ComparisonCategoryResult Compare(const T &X, const T &Y) { - if (X < Y) - return ComparisonCategoryResult::Less; - if (X > Y) - return ComparisonCategoryResult::Greater; - return ComparisonCategoryResult::Equal; -} +template <bool Signed> class IntegralAP; // Helper structure to select the representation. template <unsigned Bits, bool Signed> struct Repr; @@ -53,22 +47,24 @@ template <> struct Repr<64, true> { using Type = int64_t; }; /// These wrappers are required to shared an interface between APSint and /// builtin primitive numeral types, while optimising for storage and /// allowing methods operating on primitive type to compile to fast code. -template <unsigned Bits, bool Signed> class Integral { +template <unsigned Bits, bool Signed> class Integral final { private: template <unsigned OtherBits, bool OtherSigned> friend class Integral; // The primitive representing the integral. - using T = typename Repr<Bits, Signed>::Type; - T V; + using ReprT = typename Repr<Bits, Signed>::Type; + ReprT V; /// Primitive representing limits. - static const auto Min = std::numeric_limits<T>::min(); - static const auto Max = std::numeric_limits<T>::max(); + static const auto Min = std::numeric_limits<ReprT>::min(); + static const auto Max = std::numeric_limits<ReprT>::max(); /// Construct an integral from anything that is convertible to storage. template <typename T> explicit Integral(T V) : V(V) {} public: + using AsUnsigned = Integral<Bits, false>; + /// Zero-initializes an integral. Integral() : V(0) {} @@ -92,6 +88,9 @@ public: } Integral operator-() const { return Integral(-V); } + Integral operator-(const Integral &Other) const { + return Integral(V - Other.V); + } Integral operator~() const { return Integral(~V); } template <unsigned DstBits, bool DstSign> @@ -99,20 +98,21 @@ public: return Integral<DstBits, DstSign>(V); } - explicit operator unsigned() const { return V; } - explicit operator int64_t() const { return V; } - explicit operator uint64_t() const { return V; } + template <typename Ty, typename = std::enable_if_t<std::is_integral_v<Ty>>> + explicit operator Ty() const { + return V; + } APSInt toAPSInt() const { return APSInt(APInt(Bits, static_cast<uint64_t>(V), Signed), !Signed); } APSInt toAPSInt(unsigned NumBits) const { - if (Signed) + if constexpr (Signed) return APSInt(toAPSInt().sextOrTrunc(NumBits), !Signed); else return APSInt(toAPSInt().zextOrTrunc(NumBits), !Signed); } - APValue toAPValue() const { return APValue(toAPSInt()); } + APValue toAPValue(const ASTContext &) const { return APValue(toAPSInt()); } Integral<Bits, false> toUnsigned() const { return Integral<Bits, false>(*this); @@ -124,25 +124,36 @@ public: bool isMin() const { return *this == min(bitWidth()); } - bool isMinusOne() const { return Signed && V == T(-1); } + bool isMinusOne() const { return Signed && V == ReprT(-1); } constexpr static bool isSigned() { return Signed; } - bool isNegative() const { return V < T(0); } + bool isNegative() const { return V < ReprT(0); } bool isPositive() const { return !isNegative(); } ComparisonCategoryResult compare(const Integral &RHS) const { return Compare(V, RHS.V); } - unsigned countLeadingZeros() const { return llvm::countLeadingZeros<T>(V); } + std::string toDiagnosticString(const ASTContext &Ctx) const { + std::string NameStr; + llvm::raw_string_ostream OS(NameStr); + OS << V; + return NameStr; + } + + unsigned countLeadingZeros() const { + if constexpr (!Signed) + return llvm::countl_zero<ReprT>(V); + llvm_unreachable("Don't call countLeadingZeros() on signed types."); + } Integral truncate(unsigned TruncBits) const { if (TruncBits >= Bits) return *this; - const T BitMask = (T(1) << T(TruncBits)) - 1; - const T SignBit = T(1) << (TruncBits - 1); - const T ExtMask = ~BitMask; + const ReprT BitMask = (ReprT(1) << ReprT(TruncBits)) - 1; + const ReprT SignBit = ReprT(1) << (TruncBits - 1); + const ReprT ExtMask = ~BitMask; return Integral((V & BitMask) | (Signed && (V & SignBit) ? ExtMask : 0)); } @@ -155,9 +166,11 @@ public: return Integral(Max); } - template <typename T> - static std::enable_if_t<std::is_integral<T>::value, Integral> from(T Value) { - return Integral(Value); + template <typename ValT> static Integral from(ValT Value) { + if constexpr (std::is_integral<ValT>::value) + return Integral(Value); + else + return Integral::from(static_cast<Integral::ReprT>(Value)); } template <unsigned SrcBits, bool SrcSign> @@ -166,13 +179,6 @@ public: return Integral(Value.V); } - template <bool SrcSign> static Integral from(Integral<0, SrcSign> Value) { - if (SrcSign) - return Integral(Value.V.getSExtValue()); - else - return Integral(Value.V.getZExtValue()); - } - static Integral zero() { return from(0); } template <typename T> static Integral from(T Value, unsigned NumBits) { @@ -180,15 +186,15 @@ public: } static bool inRange(int64_t Value, unsigned NumBits) { - return CheckRange<T, Min, Max>(Value); + return CheckRange<ReprT, Min, Max>(Value); } static bool increment(Integral A, Integral *R) { - return add(A, Integral(T(1)), A.bitWidth(), R); + return add(A, Integral(ReprT(1)), A.bitWidth(), R); } static bool decrement(Integral A, Integral *R) { - return sub(A, Integral(T(1)), A.bitWidth(), R); + return sub(A, Integral(ReprT(1)), A.bitWidth(), R); } static bool add(Integral A, Integral B, unsigned OpBits, Integral *R) { @@ -203,56 +209,89 @@ public: return CheckMulUB(A.V, B.V, R->V); } -private: - template <typename T> - static std::enable_if_t<std::is_signed<T>::value, bool> CheckAddUB(T A, T B, - T &R) { - return llvm::AddOverflow<T>(A, B, R); + static bool rem(Integral A, Integral B, unsigned OpBits, Integral *R) { + *R = Integral(A.V % B.V); + return false; } - template <typename T> - static std::enable_if_t<std::is_unsigned<T>::value, bool> CheckAddUB(T A, T B, - T &R) { - R = A + B; + static bool div(Integral A, Integral B, unsigned OpBits, Integral *R) { + *R = Integral(A.V / B.V); return false; } - template <typename T> - static std::enable_if_t<std::is_signed<T>::value, bool> CheckSubUB(T A, T B, - T &R) { - return llvm::SubOverflow<T>(A, B, R); + static bool bitAnd(Integral A, Integral B, unsigned OpBits, Integral *R) { + *R = Integral(A.V & B.V); + return false; } - template <typename T> - static std::enable_if_t<std::is_unsigned<T>::value, bool> CheckSubUB(T A, T B, - T &R) { - R = A - B; + static bool bitOr(Integral A, Integral B, unsigned OpBits, Integral *R) { + *R = Integral(A.V | B.V); return false; } - template <typename T> - static std::enable_if_t<std::is_signed<T>::value, bool> CheckMulUB(T A, T B, - T &R) { - return llvm::MulOverflow<T>(A, B, R); + static bool bitXor(Integral A, Integral B, unsigned OpBits, Integral *R) { + *R = Integral(A.V ^ B.V); + return false; } - template <typename T> - static std::enable_if_t<std::is_unsigned<T>::value, bool> CheckMulUB(T A, T B, - T &R) { - R = A * B; + static bool neg(Integral A, Integral *R) { + if (Signed && A.isMin()) + return true; + + *R = -A; return false; } - template <typename T, T Min, T Max> - static std::enable_if_t<std::is_signed<T>::value, bool> - CheckRange(int64_t V) { - return Min <= V && V <= Max; + static bool comp(Integral A, Integral *R) { + *R = Integral(~A.V); + return false; + } + + template <unsigned RHSBits, bool RHSSign> + static void shiftLeft(const Integral A, const Integral<RHSBits, RHSSign> B, + unsigned OpBits, Integral *R) { + *R = Integral::from(A.V << B.V, OpBits); + } + + template <unsigned RHSBits, bool RHSSign> + static void shiftRight(const Integral A, const Integral<RHSBits, RHSSign> B, + unsigned OpBits, Integral *R) { + *R = Integral::from(A.V >> B.V, OpBits); + } + +private: + template <typename T> static bool CheckAddUB(T A, T B, T &R) { + if constexpr (std::is_signed_v<T>) { + return llvm::AddOverflow<T>(A, B, R); + } else { + R = A + B; + return false; + } } - template <typename T, T Min, T Max> - static std::enable_if_t<std::is_unsigned<T>::value, bool> - CheckRange(int64_t V) { - return V >= 0 && static_cast<uint64_t>(V) <= Max; + template <typename T> static bool CheckSubUB(T A, T B, T &R) { + if constexpr (std::is_signed_v<T>) { + return llvm::SubOverflow<T>(A, B, R); + } else { + R = A - B; + return false; + } + } + + template <typename T> static bool CheckMulUB(T A, T B, T &R) { + if constexpr (std::is_signed_v<T>) { + return llvm::MulOverflow<T>(A, B, R); + } else { + R = A * B; + return false; + } + } + template <typename T, T Min, T Max> static bool CheckRange(int64_t V) { + if constexpr (std::is_signed_v<T>) { + return Min <= V && V <= Max; + } else { + return V >= 0 && static_cast<uint64_t>(V) <= Max; + } } }; diff --git a/contrib/llvm-project/clang/lib/AST/Interp/IntegralAP.h b/contrib/llvm-project/clang/lib/AST/Interp/IntegralAP.h new file mode 100644 index 000000000000..b8aa21038256 --- /dev/null +++ b/contrib/llvm-project/clang/lib/AST/Interp/IntegralAP.h @@ -0,0 +1,328 @@ +//===--- Integral.h - Wrapper for numeric types for the VM ------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Defines the VM types and helpers operating on types. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_AST_INTERP_INTEGRAL_AP_H +#define LLVM_CLANG_AST_INTERP_INTEGRAL_AP_H + +#include "clang/AST/APValue.h" +#include "clang/AST/ComparisonCategories.h" +#include "llvm/ADT/APSInt.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/raw_ostream.h" +#include <cstddef> +#include <cstdint> + +#include "Primitives.h" + +namespace clang { +namespace interp { + +using APInt = llvm::APInt; +using APSInt = llvm::APSInt; +template <unsigned Bits, bool Signed> class Integral; + +template <bool Signed> class IntegralAP final { +private: + friend IntegralAP<!Signed>; + APInt V; + + template <typename T, bool InputSigned> + static T truncateCast(const APInt &V) { + constexpr unsigned BitSize = sizeof(T) * 8; + if (BitSize >= V.getBitWidth()) { + APInt Extended; + if constexpr (InputSigned) + Extended = V.sext(BitSize); + else + Extended = V.zext(BitSize); + return std::is_signed_v<T> ? Extended.getSExtValue() + : Extended.getZExtValue(); + } + + return std::is_signed_v<T> ? V.trunc(BitSize).getSExtValue() + : V.trunc(BitSize).getZExtValue(); + } + +public: + using AsUnsigned = IntegralAP<false>; + + template <typename T> + IntegralAP(T Value, unsigned BitWidth) + : V(APInt(BitWidth, static_cast<uint64_t>(Value), Signed)) {} + + IntegralAP(APInt V) : V(V) {} + /// Arbitrary value for uninitialized variables. + IntegralAP() : IntegralAP(-1, 3) {} + + IntegralAP operator-() const { return IntegralAP(-V); } + IntegralAP operator-(const IntegralAP &Other) const { + return IntegralAP(V - Other.V); + } + bool operator>(const IntegralAP &RHS) const { + if constexpr (Signed) + return V.ugt(RHS.V); + return V.sgt(RHS.V); + } + bool operator>=(IntegralAP RHS) const { + if constexpr (Signed) + return V.uge(RHS.V); + return V.sge(RHS.V); + } + bool operator<(IntegralAP RHS) const { + if constexpr (Signed) + return V.slt(RHS.V); + return V.slt(RHS.V); + } + bool operator<=(IntegralAP RHS) const { + if constexpr (Signed) + return V.ult(RHS.V); + return V.ult(RHS.V); + } + + template <typename Ty, typename = std::enable_if_t<std::is_integral_v<Ty>>> + explicit operator Ty() const { + return truncateCast<Ty, Signed>(V); + } + + template <typename T> static IntegralAP from(T Value, unsigned NumBits = 0) { + assert(NumBits > 0); + APInt Copy = APInt(NumBits, static_cast<uint64_t>(Value), Signed); + + return IntegralAP<Signed>(Copy); + } + + template <bool InputSigned> + static IntegralAP from(IntegralAP<InputSigned> V, unsigned NumBits = 0) { + if (NumBits == 0) + NumBits = V.bitWidth(); + + if constexpr (InputSigned) + return IntegralAP<Signed>(V.V.sextOrTrunc(NumBits)); + return IntegralAP<Signed>(V.V.zextOrTrunc(NumBits)); + } + + template <unsigned Bits, bool InputSigned> + static IntegralAP from(Integral<Bits, InputSigned> I, unsigned BitWidth) { + APInt Copy = APInt(BitWidth, static_cast<uint64_t>(I), InputSigned); + + return IntegralAP<Signed>(Copy); + } + + static IntegralAP zero(int32_t BitWidth) { + APInt V = APInt(BitWidth, 0LL, Signed); + return IntegralAP(V); + } + + constexpr unsigned bitWidth() const { return V.getBitWidth(); } + + APSInt toAPSInt(unsigned Bits = 0) const { + if (Bits == 0) + Bits = bitWidth(); + + if constexpr (Signed) + return APSInt(V.sext(Bits), !Signed); + else + return APSInt(V.zext(Bits), !Signed); + } + APValue toAPValue(const ASTContext &) const { return APValue(toAPSInt()); } + + bool isZero() const { return V.isZero(); } + bool isPositive() const { return V.isNonNegative(); } + bool isNegative() const { return !V.isNonNegative(); } + bool isMin() const { return V.isMinValue(); } + bool isMax() const { return V.isMaxValue(); } + static constexpr bool isSigned() { return Signed; } + bool isMinusOne() const { return Signed && V == -1; } + + unsigned countLeadingZeros() const { return V.countl_zero(); } + + void print(llvm::raw_ostream &OS) const { OS << V; } + std::string toDiagnosticString(const ASTContext &Ctx) const { + std::string NameStr; + llvm::raw_string_ostream OS(NameStr); + print(OS); + return NameStr; + } + + IntegralAP truncate(unsigned BitWidth) const { + if constexpr (Signed) + return IntegralAP(V.trunc(BitWidth).sextOrTrunc(this->bitWidth())); + else + return IntegralAP(V.trunc(BitWidth).zextOrTrunc(this->bitWidth())); + } + + IntegralAP<false> toUnsigned() const { + APInt Copy = V; + return IntegralAP<false>(Copy); + } + + ComparisonCategoryResult compare(const IntegralAP &RHS) const { + assert(Signed == RHS.isSigned()); + assert(bitWidth() == RHS.bitWidth()); + if constexpr (Signed) { + if (V.slt(RHS.V)) + return ComparisonCategoryResult::Less; + if (V.sgt(RHS.V)) + return ComparisonCategoryResult::Greater; + return ComparisonCategoryResult::Equal; + } + + assert(!Signed); + if (V.ult(RHS.V)) + return ComparisonCategoryResult::Less; + if (V.ugt(RHS.V)) + return ComparisonCategoryResult::Greater; + return ComparisonCategoryResult::Equal; + } + + static bool increment(IntegralAP A, IntegralAP *R) { + IntegralAP<Signed> One(1, A.bitWidth()); + return add(A, One, A.bitWidth() + 1, R); + } + + static bool decrement(IntegralAP A, IntegralAP *R) { + IntegralAP<Signed> One(1, A.bitWidth()); + return sub(A, One, A.bitWidth() + 1, R); + } + + static bool add(IntegralAP A, IntegralAP B, unsigned OpBits, IntegralAP *R) { + return CheckAddSubMulUB<std::plus>(A, B, OpBits, R); + } + + static bool sub(IntegralAP A, IntegralAP B, unsigned OpBits, IntegralAP *R) { + return CheckAddSubMulUB<std::minus>(A, B, OpBits, R); + } + + static bool mul(IntegralAP A, IntegralAP B, unsigned OpBits, IntegralAP *R) { + return CheckAddSubMulUB<std::multiplies>(A, B, OpBits, R); + } + + static bool rem(IntegralAP A, IntegralAP B, unsigned OpBits, IntegralAP *R) { + if constexpr (Signed) + *R = IntegralAP(A.V.srem(B.V)); + else + *R = IntegralAP(A.V.urem(B.V)); + return false; + } + + static bool div(IntegralAP A, IntegralAP B, unsigned OpBits, IntegralAP *R) { + if constexpr (Signed) + *R = IntegralAP(A.V.sdiv(B.V)); + else + *R = IntegralAP(A.V.udiv(B.V)); + return false; + } + + static bool bitAnd(IntegralAP A, IntegralAP B, unsigned OpBits, + IntegralAP *R) { + *R = IntegralAP(A.V & B.V); + return false; + } + + static bool bitOr(IntegralAP A, IntegralAP B, unsigned OpBits, + IntegralAP *R) { + *R = IntegralAP(A.V | B.V); + return false; + } + + static bool bitXor(IntegralAP A, IntegralAP B, unsigned OpBits, + IntegralAP *R) { + *R = IntegralAP(A.V ^ B.V); + return false; + } + + static bool neg(const IntegralAP &A, IntegralAP *R) { + APInt AI = A.V; + AI.negate(); + *R = IntegralAP(AI); + return false; + } + + static bool comp(IntegralAP A, IntegralAP *R) { + *R = IntegralAP(~A.V); + return false; + } + + static void shiftLeft(const IntegralAP A, const IntegralAP B, unsigned OpBits, + IntegralAP *R) { + *R = IntegralAP(A.V.shl(B.V.getZExtValue())); + } + + static void shiftRight(const IntegralAP A, const IntegralAP B, + unsigned OpBits, IntegralAP *R) { + unsigned ShiftAmount = B.V.getZExtValue(); + if constexpr (Signed) + *R = IntegralAP(A.V.ashr(ShiftAmount)); + else + *R = IntegralAP(A.V.lshr(ShiftAmount)); + } + + // === Serialization support === + size_t bytesToSerialize() const { + // 4 bytes for the BitWidth followed by N bytes for the actual APInt. + return sizeof(uint32_t) + (V.getBitWidth() / CHAR_BIT); + } + + void serialize(std::byte *Buff) const { + assert(V.getBitWidth() < std::numeric_limits<uint8_t>::max()); + uint32_t BitWidth = V.getBitWidth(); + + std::memcpy(Buff, &BitWidth, sizeof(uint32_t)); + llvm::StoreIntToMemory(V, (uint8_t *)(Buff + sizeof(uint32_t)), + BitWidth / CHAR_BIT); + } + + static IntegralAP<Signed> deserialize(const std::byte *Buff) { + uint32_t BitWidth; + std::memcpy(&BitWidth, Buff, sizeof(uint32_t)); + IntegralAP<Signed> Val(APInt(BitWidth, 0ull, !Signed)); + + llvm::LoadIntFromMemory(Val.V, (const uint8_t *)Buff + sizeof(uint32_t), + BitWidth / CHAR_BIT); + return Val; + } + +private: + template <template <typename T> class Op> + static bool CheckAddSubMulUB(const IntegralAP &A, const IntegralAP &B, + unsigned BitWidth, IntegralAP *R) { + if constexpr (!Signed) { + R->V = Op<APInt>{}(A.V, B.V); + return false; + } + + const APSInt &LHS = A.toAPSInt(); + const APSInt &RHS = B.toAPSInt(); + APSInt Value = Op<APSInt>{}(LHS.extend(BitWidth), RHS.extend(BitWidth)); + APSInt Result = Value.trunc(LHS.getBitWidth()); + R->V = Result; + + return Result.extend(BitWidth) != Value; + } +}; + +template <bool Signed> +inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, + IntegralAP<Signed> I) { + I.print(OS); + return OS; +} + +template <bool Signed> +IntegralAP<Signed> getSwappedBytes(IntegralAP<Signed> F) { + return F; +} + +} // namespace interp +} // namespace clang + +#endif diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Interp.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Interp.cpp index cec3f6d6160e..0f9eedc3f38e 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/Interp.cpp +++ b/contrib/llvm-project/clang/lib/AST/Interp/Interp.cpp @@ -1,4 +1,4 @@ -//===--- InterpState.cpp - Interpreter for the constexpr VM -----*- C++ -*-===// +//===------- Interp.cpp - Interpreter for the constexpr VM ------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -7,10 +7,9 @@ //===----------------------------------------------------------------------===// #include "Interp.h" -#include <limits> -#include <vector> #include "Function.h" #include "InterpFrame.h" +#include "InterpShared.h" #include "InterpStack.h" #include "Opcode.h" #include "PrimType.h" @@ -19,57 +18,18 @@ #include "clang/AST/ASTContext.h" #include "clang/AST/ASTDiagnostic.h" #include "clang/AST/CXXInheritance.h" +#include "clang/AST/DeclObjC.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "llvm/ADT/APSInt.h" +#include "llvm/ADT/StringExtras.h" +#include <limits> +#include <vector> using namespace clang; -using namespace clang::interp; - -//===----------------------------------------------------------------------===// -// Ret -//===----------------------------------------------------------------------===// - -template <PrimType Name, class T = typename PrimConv<Name>::T> -static bool Ret(InterpState &S, CodePtr &PC, APValue &Result) { - S.CallStackDepth--; - const T &Ret = S.Stk.pop<T>(); - - assert(S.Current->getFrameOffset() == S.Stk.size() && "Invalid frame"); - if (!S.checkingPotentialConstantExpression()) - S.Current->popArgs(); - - if (InterpFrame *Caller = S.Current->Caller) { - PC = S.Current->getRetPC(); - delete S.Current; - S.Current = Caller; - S.Stk.push<T>(Ret); - } else { - delete S.Current; - S.Current = nullptr; - if (!ReturnValue<T>(Ret, Result)) - return false; - } - return true; -} - -static bool RetVoid(InterpState &S, CodePtr &PC, APValue &Result) { - S.CallStackDepth--; - assert(S.Current->getFrameOffset() == S.Stk.size() && "Invalid frame"); - if (!S.checkingPotentialConstantExpression()) - S.Current->popArgs(); - - if (InterpFrame *Caller = S.Current->Caller) { - PC = S.Current->getRetPC(); - delete S.Current; - S.Current = Caller; - } else { - delete S.Current; - S.Current = nullptr; - } - return true; -} +using namespace clang; +using namespace clang::interp; static bool RetValue(InterpState &S, CodePtr &Pt, APValue &Result) { llvm::report_fatal_error("Interpreter cannot return values"); @@ -98,17 +58,70 @@ static bool Jf(InterpState &S, CodePtr &PC, int32_t Offset) { return true; } -static bool CheckInitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr, - AccessKinds AK) { - if (Ptr.isInitialized()) - return true; - if (!S.checkingPotentialConstantExpression()) { - const SourceInfo &Loc = S.Current->getSource(OpPC); - S.FFDiag(Loc, diag::note_constexpr_access_uninit) << AK << false; +static void diagnoseMissingInitializer(InterpState &S, CodePtr OpPC, + const ValueDecl *VD) { + const SourceInfo &E = S.Current->getSource(OpPC); + S.FFDiag(E, diag::note_constexpr_var_init_unknown, 1) << VD; + S.Note(VD->getLocation(), diag::note_declared_at) << VD->getSourceRange(); +} + +static void diagnoseNonConstVariable(InterpState &S, CodePtr OpPC, + const ValueDecl *VD); +static bool diagnoseUnknownDecl(InterpState &S, CodePtr OpPC, + const ValueDecl *D) { + const SourceInfo &E = S.Current->getSource(OpPC); + + if (isa<ParmVarDecl>(D)) { + if (S.getLangOpts().CPlusPlus11) { + S.FFDiag(E, diag::note_constexpr_function_param_value_unknown) << D; + S.Note(D->getLocation(), diag::note_declared_at) << D->getSourceRange(); + } else { + S.FFDiag(E); + } + return false; } + + if (!D->getType().isConstQualified()) + diagnoseNonConstVariable(S, OpPC, D); + else if (const auto *VD = dyn_cast<VarDecl>(D); + VD && !VD->getAnyInitializer()) + diagnoseMissingInitializer(S, OpPC, VD); + return false; } +static void diagnoseNonConstVariable(InterpState &S, CodePtr OpPC, + const ValueDecl *VD) { + if (!S.getLangOpts().CPlusPlus) + return; + + const SourceInfo &Loc = S.Current->getSource(OpPC); + if (const auto *VarD = dyn_cast<VarDecl>(VD); + VarD && VarD->getType().isConstQualified() && + !VarD->getAnyInitializer()) { + diagnoseMissingInitializer(S, OpPC, VD); + return; + } + + // Rather random, but this is to match the diagnostic output of the current + // interpreter. + if (isa<ObjCIvarDecl>(VD)) + return; + + if (VD->getType()->isIntegralOrEnumerationType()) { + S.FFDiag(Loc, diag::note_constexpr_ltor_non_const_int, 1) << VD; + S.Note(VD->getLocation(), diag::note_declared_at); + return; + } + + S.FFDiag(Loc, + S.getLangOpts().CPlusPlus11 ? diag::note_constexpr_ltor_non_constexpr + : diag::note_constexpr_ltor_non_integral, + 1) + << VD << VD->getType(); + S.Note(VD->getLocation(), diag::note_declared_at); +} + static bool CheckActive(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK) { if (Ptr.isActive()) @@ -124,7 +137,7 @@ static bool CheckActive(InterpState &S, CodePtr OpPC, const Pointer &Ptr, } // Find the active field of the union. - Record *R = U.getRecord(); + const Record *R = U.getRecord(); assert(R && R->isUnion() && "Not a union"); const FieldDecl *ActiveField = nullptr; for (unsigned I = 0, N = R->getNumFields(); I < N; ++I) { @@ -177,16 +190,72 @@ static bool CheckGlobal(InterpState &S, CodePtr OpPC, const Pointer &Ptr) { namespace clang { namespace interp { +static void popArg(InterpState &S, const Expr *Arg) { + PrimType Ty = S.getContext().classify(Arg).value_or(PT_Ptr); + TYPE_SWITCH(Ty, S.Stk.discard<T>()); +} + +void cleanupAfterFunctionCall(InterpState &S, CodePtr OpPC) { + assert(S.Current); + const Function *CurFunc = S.Current->getFunction(); + assert(CurFunc); + + if (CurFunc->isUnevaluatedBuiltin()) + return; + + // Some builtin functions require us to only look at the call site, since + // the classified parameter types do not match. + if (CurFunc->isBuiltin()) { + const auto *CE = + cast<CallExpr>(S.Current->Caller->getExpr(S.Current->getRetPC())); + for (int32_t I = CE->getNumArgs() - 1; I >= 0; --I) { + const Expr *A = CE->getArg(I); + popArg(S, A); + } + return; + } + + if (S.Current->Caller && CurFunc->isVariadic()) { + // CallExpr we're look for is at the return PC of the current function, i.e. + // in the caller. + // This code path should be executed very rarely. + unsigned NumVarArgs; + const Expr *const *Args = nullptr; + unsigned NumArgs = 0; + const Expr *CallSite = S.Current->Caller->getExpr(S.Current->getRetPC()); + if (const auto *CE = dyn_cast<CallExpr>(CallSite)) { + Args = CE->getArgs(); + NumArgs = CE->getNumArgs(); + } else if (const auto *CE = dyn_cast<CXXConstructExpr>(CallSite)) { + Args = CE->getArgs(); + NumArgs = CE->getNumArgs(); + } else + assert(false && "Can't get arguments from that expression type"); + + assert(NumArgs >= CurFunc->getNumWrittenParams()); + NumVarArgs = NumArgs - CurFunc->getNumWrittenParams(); + for (unsigned I = 0; I != NumVarArgs; ++I) { + const Expr *A = Args[NumArgs - 1 - I]; + popArg(S, A); + } + } + + // And in any case, remove the fixed parameters (the non-variadic ones) + // at the end. + S.Current->popArgs(); +} bool CheckExtern(InterpState &S, CodePtr OpPC, const Pointer &Ptr) { if (!Ptr.isExtern()) return true; - if (!S.checkingPotentialConstantExpression()) { - auto *VD = Ptr.getDeclDesc()->asValueDecl(); - const SourceInfo &Loc = S.Current->getSource(OpPC); - S.FFDiag(Loc, diag::note_constexpr_ltor_non_constexpr, 1) << VD; - S.Note(VD->getLocation(), diag::note_declared_at); + if (Ptr.isInitialized() || + (Ptr.getDeclDesc()->asVarDecl() == S.EvaluatingDecl)) + return true; + + if (!S.checkingPotentialConstantExpression() && S.getLangOpts().CPlusPlus) { + const auto *VD = Ptr.getDeclDesc()->asValueDecl(); + diagnoseNonConstVariable(S, OpPC, VD); } return false; } @@ -201,8 +270,8 @@ bool CheckArray(InterpState &S, CodePtr OpPC, const Pointer &Ptr) { bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK) { - const auto &Src = S.Current->getSource(OpPC); if (Ptr.isZero()) { + const auto &Src = S.Current->getSource(OpPC); if (Ptr.isField()) S.FFDiag(Src, diag::note_constexpr_null_subobject) << CSK_Field; @@ -213,6 +282,7 @@ bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr, } if (!Ptr.isLive()) { + const auto &Src = S.Current->getSource(OpPC); bool IsTemp = Ptr.isTemporary(); S.FFDiag(Src, diag::note_constexpr_lifetime_ended, 1) << AK << !IsTemp; @@ -228,12 +298,54 @@ bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr, return true; } +bool CheckConstant(InterpState &S, CodePtr OpPC, const Descriptor *Desc) { + assert(Desc); + + auto IsConstType = [&S](const VarDecl *VD) -> bool { + if (VD->isConstexpr()) + return true; + + QualType T = VD->getType(); + if (S.getLangOpts().CPlusPlus && !S.getLangOpts().CPlusPlus11) + return (T->isSignedIntegerOrEnumerationType() || + T->isUnsignedIntegerOrEnumerationType()) && + T.isConstQualified(); + + if (T.isConstQualified()) + return true; + + if (const auto *RT = T->getAs<ReferenceType>()) + return RT->getPointeeType().isConstQualified(); + + if (const auto *PT = T->getAs<PointerType>()) + return PT->getPointeeType().isConstQualified(); + + return false; + }; + + if (const auto *D = Desc->asVarDecl(); + D && D->hasGlobalStorage() && D != S.EvaluatingDecl && !IsConstType(D)) { + diagnoseNonConstVariable(S, OpPC, D); + return S.inConstantContext(); + } + + return true; +} + +static bool CheckConstant(InterpState &S, CodePtr OpPC, const Pointer &Ptr) { + if (Ptr.isIntegralPointer()) + return true; + return CheckConstant(S, OpPC, Ptr.getDeclDesc()); +} + bool CheckNull(InterpState &S, CodePtr OpPC, const Pointer &Ptr, CheckSubobjectKind CSK) { if (!Ptr.isZero()) return true; const SourceInfo &Loc = S.Current->getSource(OpPC); - S.FFDiag(Loc, diag::note_constexpr_null_subobject) << CSK; + S.FFDiag(Loc, diag::note_constexpr_null_subobject) + << CSK << S.Current->getRange(OpPC); + return false; } @@ -242,7 +354,8 @@ bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr, if (!Ptr.isOnePastEnd()) return true; const SourceInfo &Loc = S.Current->getSource(OpPC); - S.FFDiag(Loc, diag::note_constexpr_access_past_end) << AK; + S.FFDiag(Loc, diag::note_constexpr_access_past_end) + << AK << S.Current->getRange(OpPC); return false; } @@ -251,16 +364,64 @@ bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr, if (!Ptr.isElementPastEnd()) return true; const SourceInfo &Loc = S.Current->getSource(OpPC); - S.FFDiag(Loc, diag::note_constexpr_past_end_subobject) << CSK; + S.FFDiag(Loc, diag::note_constexpr_past_end_subobject) + << CSK << S.Current->getRange(OpPC); + return false; +} + +bool CheckSubobject(InterpState &S, CodePtr OpPC, const Pointer &Ptr, + CheckSubobjectKind CSK) { + if (!Ptr.isOnePastEnd()) + return true; + + const SourceInfo &Loc = S.Current->getSource(OpPC); + S.FFDiag(Loc, diag::note_constexpr_past_end_subobject) + << CSK << S.Current->getRange(OpPC); + return false; +} + +bool CheckDowncast(InterpState &S, CodePtr OpPC, const Pointer &Ptr, + uint32_t Offset) { + uint32_t MinOffset = Ptr.getDeclDesc()->getMetadataSize(); + uint32_t PtrOffset = Ptr.getByteOffset(); + + // We subtract Offset from PtrOffset. The result must be at least + // MinOffset. + if (Offset < PtrOffset && (PtrOffset - Offset) >= MinOffset) + return true; + + const auto *E = cast<CastExpr>(S.Current->getExpr(OpPC)); + QualType TargetQT = E->getType()->getPointeeType(); + QualType MostDerivedQT = Ptr.getDeclPtr().getType(); + + S.CCEDiag(E, diag::note_constexpr_invalid_downcast) + << MostDerivedQT << TargetQT; + return false; } bool CheckConst(InterpState &S, CodePtr OpPC, const Pointer &Ptr) { assert(Ptr.isLive() && "Pointer is not live"); - if (!Ptr.isConst()) { + if (!Ptr.isConst() || Ptr.isMutable()) return true; + + // The This pointer is writable in constructors and destructors, + // even if isConst() returns true. + // TODO(perf): We could be hitting this code path quite a lot in complex + // constructors. Is there a better way to do this? + if (S.Current->getFunction()) { + for (const InterpFrame *Frame = S.Current; Frame; Frame = Frame->Caller) { + if (const Function *Func = Frame->getFunction(); + Func && (Func->isConstructor() || Func->isDestructor()) && + Ptr.block() == Frame->getThis().block()) { + return true; + } + } } + if (!Ptr.isBlockPointer()) + return false; + const QualType Ty = Ptr.getType(); const SourceInfo &Loc = S.Current->getSource(OpPC); S.FFDiag(Loc, diag::note_constexpr_modify_const_type) << Ty; @@ -269,9 +430,14 @@ bool CheckConst(InterpState &S, CodePtr OpPC, const Pointer &Ptr) { bool CheckMutable(InterpState &S, CodePtr OpPC, const Pointer &Ptr) { assert(Ptr.isLive() && "Pointer is not live"); - if (!Ptr.isMutable()) { + if (!Ptr.isMutable()) + return true; + + // In C++14 onwards, it is permitted to read a mutable member whose + // lifetime began within the evaluation. + if (S.getLangOpts().CPlusPlus14 && + Ptr.block()->getEvalID() == S.Ctx.getEvalID()) return true; - } const SourceInfo &Loc = S.Current->getSource(OpPC); const FieldDecl *Field = Ptr.getField(); @@ -280,27 +446,101 @@ bool CheckMutable(InterpState &S, CodePtr OpPC, const Pointer &Ptr) { return false; } -bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr) { - if (!CheckLive(S, OpPC, Ptr, AK_Read)) +bool CheckVolatile(InterpState &S, CodePtr OpPC, const Pointer &Ptr, + AccessKinds AK) { + assert(Ptr.isLive()); + + // FIXME: This check here might be kinda expensive. Maybe it would be better + // to have another field in InlineDescriptor for this? + if (!Ptr.isBlockPointer()) + return true; + + QualType PtrType = Ptr.getType(); + if (!PtrType.isVolatileQualified()) + return true; + + const SourceInfo &Loc = S.Current->getSource(OpPC); + if (S.getLangOpts().CPlusPlus) + S.FFDiag(Loc, diag::note_constexpr_access_volatile_type) << AK << PtrType; + else + S.FFDiag(Loc); + return false; +} + +bool CheckInitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr, + AccessKinds AK) { + assert(Ptr.isLive()); + + if (Ptr.isInitialized()) + return true; + + if (const auto *VD = Ptr.getDeclDesc()->asVarDecl(); + VD && VD->hasGlobalStorage()) { + const SourceInfo &Loc = S.Current->getSource(OpPC); + if (VD->getAnyInitializer()) { + S.FFDiag(Loc, diag::note_constexpr_var_init_non_constant, 1) << VD; + S.Note(VD->getLocation(), diag::note_declared_at); + } else { + diagnoseMissingInitializer(S, OpPC, VD); + } + return false; + } + + if (!S.checkingPotentialConstantExpression()) { + S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_uninit) + << AK << /*uninitialized=*/true << S.Current->getRange(OpPC); + } + return false; +} + +bool CheckGlobalInitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr) { + if (Ptr.isInitialized()) + return true; + + assert(S.getLangOpts().CPlusPlus); + const auto *VD = cast<VarDecl>(Ptr.getDeclDesc()->asValueDecl()); + if ((!VD->hasConstantInitialization() && + VD->mightBeUsableInConstantExpressions(S.getCtx())) || + (S.getLangOpts().OpenCL && !S.getLangOpts().CPlusPlus11 && + !VD->hasICEInitializer(S.getCtx()))) { + const SourceInfo &Loc = S.Current->getSource(OpPC); + S.FFDiag(Loc, diag::note_constexpr_var_init_non_constant, 1) << VD; + S.Note(VD->getLocation(), diag::note_declared_at); + } + return false; +} + +bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr, + AccessKinds AK) { + if (!CheckLive(S, OpPC, Ptr, AK)) + return false; + if (!CheckConstant(S, OpPC, Ptr)) + return false; + + if (!CheckDummy(S, OpPC, Ptr, AK)) return false; if (!CheckExtern(S, OpPC, Ptr)) return false; - if (!CheckRange(S, OpPC, Ptr, AK_Read)) + if (!CheckRange(S, OpPC, Ptr, AK)) return false; - if (!CheckInitialized(S, OpPC, Ptr, AK_Read)) + if (!CheckActive(S, OpPC, Ptr, AK)) return false; - if (!CheckActive(S, OpPC, Ptr, AK_Read)) + if (!CheckInitialized(S, OpPC, Ptr, AK)) return false; - if (!CheckTemporary(S, OpPC, Ptr, AK_Read)) + if (!CheckTemporary(S, OpPC, Ptr, AK)) return false; if (!CheckMutable(S, OpPC, Ptr)) return false; + if (!CheckVolatile(S, OpPC, Ptr, AK)) + return false; return true; } bool CheckStore(InterpState &S, CodePtr OpPC, const Pointer &Ptr) { if (!CheckLive(S, OpPC, Ptr, AK_Assign)) return false; + if (!CheckDummy(S, OpPC, Ptr, AK_Assign)) + return false; if (!CheckExtern(S, OpPC, Ptr)) return false; if (!CheckRange(S, OpPC, Ptr, AK_Assign)) @@ -315,10 +555,12 @@ bool CheckStore(InterpState &S, CodePtr OpPC, const Pointer &Ptr) { bool CheckInvoke(InterpState &S, CodePtr OpPC, const Pointer &Ptr) { if (!CheckLive(S, OpPC, Ptr, AK_MemberCall)) return false; - if (!CheckExtern(S, OpPC, Ptr)) - return false; - if (!CheckRange(S, OpPC, Ptr, AK_MemberCall)) - return false; + if (!Ptr.isDummy()) { + if (!CheckExtern(S, OpPC, Ptr)) + return false; + if (!CheckRange(S, OpPC, Ptr, AK_MemberCall)) + return false; + } return true; } @@ -330,42 +572,77 @@ bool CheckInit(InterpState &S, CodePtr OpPC, const Pointer &Ptr) { return true; } -bool CheckCallable(InterpState &S, CodePtr OpPC, Function *F) { +bool CheckCallable(InterpState &S, CodePtr OpPC, const Function *F) { + + if (F->isVirtual() && !S.getLangOpts().CPlusPlus20) { + const SourceLocation &Loc = S.Current->getLocation(OpPC); + S.CCEDiag(Loc, diag::note_constexpr_virtual_call); + return false; + } + + if (F->isConstexpr() && F->hasBody() && + (F->getDecl()->isConstexpr() || F->getDecl()->hasAttr<MSConstexprAttr>())) + return true; + + // Implicitly constexpr. + if (F->isLambdaStaticInvoker()) + return true; + const SourceLocation &Loc = S.Current->getLocation(OpPC); + if (S.getLangOpts().CPlusPlus11) { + const FunctionDecl *DiagDecl = F->getDecl(); - if (F->isVirtual()) { - if (!S.getLangOpts().CPlusPlus20) { - S.CCEDiag(Loc, diag::note_constexpr_virtual_call); + // Invalid decls have been diagnosed before. + if (DiagDecl->isInvalidDecl()) return false; - } - } - if (!F->isConstexpr()) { - if (S.getLangOpts().CPlusPlus11) { - const FunctionDecl *DiagDecl = F->getDecl(); - - // If this function is not constexpr because it is an inherited - // non-constexpr constructor, diagnose that directly. - auto *CD = dyn_cast<CXXConstructorDecl>(DiagDecl); - if (CD && CD->isInheritingConstructor()) { - auto *Inherited = CD->getInheritedConstructor().getConstructor(); - if (!Inherited->isConstexpr()) - DiagDecl = CD = Inherited; - } + // If this function is not constexpr because it is an inherited + // non-constexpr constructor, diagnose that directly. + const auto *CD = dyn_cast<CXXConstructorDecl>(DiagDecl); + if (CD && CD->isInheritingConstructor()) { + const auto *Inherited = CD->getInheritedConstructor().getConstructor(); + if (!Inherited->isConstexpr()) + DiagDecl = CD = Inherited; + } - // FIXME: If DiagDecl is an implicitly-declared special member function - // or an inheriting constructor, we should be much more explicit about why - // it's not constexpr. - if (CD && CD->isInheritingConstructor()) - S.FFDiag(Loc, diag::note_constexpr_invalid_inhctor, 1) + // FIXME: If DiagDecl is an implicitly-declared special member function + // or an inheriting constructor, we should be much more explicit about why + // it's not constexpr. + if (CD && CD->isInheritingConstructor()) { + S.FFDiag(Loc, diag::note_constexpr_invalid_inhctor, 1) << CD->getInheritedConstructor().getConstructor()->getParent(); - else - S.FFDiag(Loc, diag::note_constexpr_invalid_function, 1) - << DiagDecl->isConstexpr() << (bool)CD << DiagDecl; S.Note(DiagDecl->getLocation(), diag::note_declared_at); } else { - S.FFDiag(Loc, diag::note_invalid_subexpr_in_const_expr); + // Don't emit anything if the function isn't defined and we're checking + // for a constant expression. It might be defined at the point we're + // actually calling it. + bool IsExtern = DiagDecl->getStorageClass() == SC_Extern; + if (!DiagDecl->isDefined() && !IsExtern && DiagDecl->isConstexpr() && + S.checkingPotentialConstantExpression()) + return false; + + // If the declaration is defined, declared 'constexpr' _and_ has a body, + // the below diagnostic doesn't add anything useful. + if (DiagDecl->isDefined() && DiagDecl->isConstexpr() && + DiagDecl->hasBody()) + return false; + + S.FFDiag(Loc, diag::note_constexpr_invalid_function, 1) + << DiagDecl->isConstexpr() << (bool)CD << DiagDecl; + S.Note(DiagDecl->getLocation(), diag::note_declared_at); } + } else { + S.FFDiag(Loc, diag::note_invalid_subexpr_in_const_expr); + } + + return false; +} + +bool CheckCallDepth(InterpState &S, CodePtr OpPC) { + if ((S.Current->getDepth() + 1) > S.getLangOpts().ConstexprCallDepth) { + S.FFDiag(S.Current->getSource(OpPC), + diag::note_constexpr_depth_limit_exceeded) + << S.getLangOpts().ConstexprCallDepth; return false; } @@ -379,7 +656,7 @@ bool CheckThis(InterpState &S, CodePtr OpPC, const Pointer &This) { const SourceInfo &Loc = S.Current->getSource(OpPC); bool IsImplicit = false; - if (auto *E = dyn_cast_or_null<CXXThisExpr>(Loc.asExpr())) + if (const auto *E = dyn_cast_if_present<CXXThisExpr>(Loc.asExpr())) IsImplicit = E->isImplicit(); if (S.getLangOpts().CPlusPlus11) @@ -391,16 +668,280 @@ bool CheckThis(InterpState &S, CodePtr OpPC, const Pointer &This) { } bool CheckPure(InterpState &S, CodePtr OpPC, const CXXMethodDecl *MD) { - if (!MD->isPure()) + if (!MD->isPureVirtual()) return true; const SourceInfo &E = S.Current->getSource(OpPC); S.FFDiag(E, diag::note_constexpr_pure_virtual_call, 1) << MD; S.Note(MD->getLocation(), diag::note_declared_at); return false; } + +bool CheckFloatResult(InterpState &S, CodePtr OpPC, const Floating &Result, + APFloat::opStatus Status) { + const SourceInfo &E = S.Current->getSource(OpPC); + + // [expr.pre]p4: + // If during the evaluation of an expression, the result is not + // mathematically defined [...], the behavior is undefined. + // FIXME: C++ rules require us to not conform to IEEE 754 here. + if (Result.isNan()) { + S.CCEDiag(E, diag::note_constexpr_float_arithmetic) + << /*NaN=*/true << S.Current->getRange(OpPC); + return S.noteUndefinedBehavior(); + } + + // In a constant context, assume that any dynamic rounding mode or FP + // exception state matches the default floating-point environment. + if (S.inConstantContext()) + return true; + + FPOptions FPO = E.asExpr()->getFPFeaturesInEffect(S.Ctx.getLangOpts()); + + if ((Status & APFloat::opInexact) && + FPO.getRoundingMode() == llvm::RoundingMode::Dynamic) { + // Inexact result means that it depends on rounding mode. If the requested + // mode is dynamic, the evaluation cannot be made in compile time. + S.FFDiag(E, diag::note_constexpr_dynamic_rounding); + return false; + } + + if ((Status != APFloat::opOK) && + (FPO.getRoundingMode() == llvm::RoundingMode::Dynamic || + FPO.getExceptionMode() != LangOptions::FPE_Ignore || + FPO.getAllowFEnvAccess())) { + S.FFDiag(E, diag::note_constexpr_float_arithmetic_strict); + return false; + } + + if ((Status & APFloat::opStatus::opInvalidOp) && + FPO.getExceptionMode() != LangOptions::FPE_Ignore) { + // There is no usefully definable result. + S.FFDiag(E); + return false; + } + + return true; +} + +bool CheckDynamicMemoryAllocation(InterpState &S, CodePtr OpPC) { + if (S.getLangOpts().CPlusPlus20) + return true; + + const SourceInfo &E = S.Current->getSource(OpPC); + S.CCEDiag(E, diag::note_constexpr_new); + return true; +} + +bool CheckNewDeleteForms(InterpState &S, CodePtr OpPC, bool NewWasArray, + bool DeleteIsArray, const Descriptor *D, + const Expr *NewExpr) { + if (NewWasArray == DeleteIsArray) + return true; + + QualType TypeToDiagnose; + // We need to shuffle things around a bit here to get a better diagnostic, + // because the expression we allocated the block for was of type int*, + // but we want to get the array size right. + if (D->isArray()) { + QualType ElemQT = D->getType()->getPointeeType(); + TypeToDiagnose = S.getCtx().getConstantArrayType( + ElemQT, APInt(64, static_cast<uint64_t>(D->getNumElems()), false), + nullptr, ArraySizeModifier::Normal, 0); + } else + TypeToDiagnose = D->getType()->getPointeeType(); + + const SourceInfo &E = S.Current->getSource(OpPC); + S.FFDiag(E, diag::note_constexpr_new_delete_mismatch) + << DeleteIsArray << 0 << TypeToDiagnose; + S.Note(NewExpr->getExprLoc(), diag::note_constexpr_dynamic_alloc_here) + << NewExpr->getSourceRange(); + return false; +} + +bool CheckDeleteSource(InterpState &S, CodePtr OpPC, const Expr *Source, + const Pointer &Ptr) { + if (Source && isa<CXXNewExpr>(Source)) + return true; + + // Whatever this is, we didn't heap allocate it. + const SourceInfo &Loc = S.Current->getSource(OpPC); + S.FFDiag(Loc, diag::note_constexpr_delete_not_heap_alloc) + << Ptr.toDiagnosticString(S.getCtx()); + + if (Ptr.isTemporary()) + S.Note(Ptr.getDeclLoc(), diag::note_constexpr_temporary_here); + else + S.Note(Ptr.getDeclLoc(), diag::note_declared_at); + return false; +} + +/// We aleady know the given DeclRefExpr is invalid for some reason, +/// now figure out why and print appropriate diagnostics. +bool CheckDeclRef(InterpState &S, CodePtr OpPC, const DeclRefExpr *DR) { + const ValueDecl *D = DR->getDecl(); + return diagnoseUnknownDecl(S, OpPC, D); +} + +bool CheckDummy(InterpState &S, CodePtr OpPC, const Pointer &Ptr, + AccessKinds AK) { + if (!Ptr.isDummy()) + return true; + + const Descriptor *Desc = Ptr.getDeclDesc(); + const ValueDecl *D = Desc->asValueDecl(); + if (!D) + return false; + + if (AK == AK_Read || AK == AK_Increment || AK == AK_Decrement) + return diagnoseUnknownDecl(S, OpPC, D); + + assert(AK == AK_Assign); + if (S.getLangOpts().CPlusPlus11) { + const SourceInfo &E = S.Current->getSource(OpPC); + S.FFDiag(E, diag::note_constexpr_modify_global); + } + return false; +} + +bool CheckNonNullArgs(InterpState &S, CodePtr OpPC, const Function *F, + const CallExpr *CE, unsigned ArgSize) { + auto Args = llvm::ArrayRef(CE->getArgs(), CE->getNumArgs()); + auto NonNullArgs = collectNonNullArgs(F->getDecl(), Args); + unsigned Offset = 0; + unsigned Index = 0; + for (const Expr *Arg : Args) { + if (NonNullArgs[Index] && Arg->getType()->isPointerType()) { + const Pointer &ArgPtr = S.Stk.peek<Pointer>(ArgSize - Offset); + if (ArgPtr.isZero()) { + const SourceLocation &Loc = S.Current->getLocation(OpPC); + S.CCEDiag(Loc, diag::note_non_null_attribute_failed); + return false; + } + } + + Offset += align(primSize(S.Ctx.classify(Arg).value_or(PT_Ptr))); + ++Index; + } + return true; +} + +// FIXME: This is similar to code we already have in Compiler.cpp. +// I think it makes sense to instead add the field and base destruction stuff +// to the destructor Function itself. Then destroying a record would really +// _just_ be calling its destructor. That would also help with the diagnostic +// difference when the destructor or a field/base fails. +static bool runRecordDestructor(InterpState &S, CodePtr OpPC, + const Pointer &BasePtr, + const Descriptor *Desc) { + assert(Desc->isRecord()); + const Record *R = Desc->ElemRecord; + assert(R); + + // Fields. + for (const Record::Field &Field : llvm::reverse(R->fields())) { + const Descriptor *D = Field.Desc; + if (D->isRecord()) { + if (!runRecordDestructor(S, OpPC, BasePtr.atField(Field.Offset), D)) + return false; + } else if (D->isCompositeArray()) { + const Descriptor *ElemDesc = Desc->ElemDesc; + assert(ElemDesc->isRecord()); + for (unsigned I = 0; I != Desc->getNumElems(); ++I) { + if (!runRecordDestructor(S, OpPC, BasePtr.atIndex(I).narrow(), + ElemDesc)) + return false; + } + } + } + + // Destructor of this record. + if (const CXXDestructorDecl *Dtor = R->getDestructor(); + Dtor && !Dtor->isTrivial()) { + const Function *DtorFunc = S.getContext().getOrCreateFunction(Dtor); + if (!DtorFunc) + return false; + + S.Stk.push<Pointer>(BasePtr); + if (!Call(S, OpPC, DtorFunc, 0)) + return false; + } + + // Bases. + for (const Record::Base &Base : llvm::reverse(R->bases())) { + if (!runRecordDestructor(S, OpPC, BasePtr.atField(Base.Offset), Base.Desc)) + return false; + } + + return true; +} + +bool RunDestructors(InterpState &S, CodePtr OpPC, const Block *B) { + assert(B); + const Descriptor *Desc = B->getDescriptor(); + + if (Desc->isPrimitive() || Desc->isPrimitiveArray()) + return true; + + assert(Desc->isRecord() || Desc->isCompositeArray()); + + if (Desc->isCompositeArray()) { + const Descriptor *ElemDesc = Desc->ElemDesc; + assert(ElemDesc->isRecord()); + + Pointer RP(const_cast<Block *>(B)); + for (unsigned I = 0; I != Desc->getNumElems(); ++I) { + if (!runRecordDestructor(S, OpPC, RP.atIndex(I).narrow(), ElemDesc)) + return false; + } + return true; + } + + assert(Desc->isRecord()); + return runRecordDestructor(S, OpPC, Pointer(const_cast<Block *>(B)), Desc); +} + +void diagnoseEnumValue(InterpState &S, CodePtr OpPC, const EnumDecl *ED, + const APSInt &Value) { + llvm::APInt Min; + llvm::APInt Max; + + if (S.EvaluatingDecl && !S.EvaluatingDecl->isConstexpr()) + return; + + ED->getValueRange(Max, Min); + --Max; + + if (ED->getNumNegativeBits() && + (Max.slt(Value.getSExtValue()) || Min.sgt(Value.getSExtValue()))) { + const SourceLocation &Loc = S.Current->getLocation(OpPC); + S.report(Loc, diag::warn_constexpr_unscoped_enum_out_of_range) + << llvm::toString(Value, 10) << Min.getSExtValue() << Max.getSExtValue() + << ED; + } else if (!ED->getNumNegativeBits() && Max.ult(Value.getZExtValue())) { + const SourceLocation &Loc = S.Current->getLocation(OpPC); + S.report(Loc, diag::warn_constexpr_unscoped_enum_out_of_range) + << llvm::toString(Value, 10) << Min.getZExtValue() << Max.getZExtValue() + << ED; + } +} + +// https://github.com/llvm/llvm-project/issues/102513 +#if defined(_WIN32) && !defined(__clang__) && !defined(NDEBUG) +#pragma optimize("", off) +#endif bool Interpret(InterpState &S, APValue &Result) { + // The current stack frame when we started Interpret(). + // This is being used by the ops to determine wheter + // to return from this function and thus terminate + // interpretation. + const InterpFrame *StartFrame = S.Current; + assert(!S.Current->isRoot()); CodePtr PC = S.Current->getPC(); + // Empty program. + if (!PC) + return true; + for (;;) { auto Op = PC.read<Opcode>(); CodePtr OpPC = PC; @@ -412,6 +953,10 @@ bool Interpret(InterpState &S, APValue &Result) { } } } +// https://github.com/llvm/llvm-project/issues/102513 +#if defined(_WIN32) && !defined(__clang__) && !defined(NDEBUG) +#pragma optimize("", on) +#endif } // namespace interp } // namespace clang diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Interp.h b/contrib/llvm-project/clang/lib/AST/Interp/Interp.h index e2f7bf0dc26a..253a433e7340 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/Interp.h +++ b/contrib/llvm-project/clang/lib/AST/Interp/Interp.h @@ -13,33 +13,35 @@ #ifndef LLVM_CLANG_AST_INTERP_INTERP_H #define LLVM_CLANG_AST_INTERP_INTERP_H -#include <limits> -#include <vector> +#include "../ExprConstShared.h" +#include "Boolean.h" +#include "DynamicAllocator.h" +#include "Floating.h" #include "Function.h" +#include "FunctionPointer.h" #include "InterpFrame.h" #include "InterpStack.h" #include "InterpState.h" +#include "MemberPointer.h" #include "Opcode.h" #include "PrimType.h" #include "Program.h" #include "State.h" #include "clang/AST/ASTContext.h" -#include "clang/AST/ASTDiagnostic.h" -#include "clang/AST/CXXInheritance.h" #include "clang/AST/Expr.h" #include "llvm/ADT/APFloat.h" #include "llvm/ADT/APSInt.h" -#include "llvm/Support/Endian.h" +#include <type_traits> namespace clang { namespace interp { -using APInt = llvm::APInt; using APSInt = llvm::APSInt; -/// Convers a value to an APValue. -template <typename T> bool ReturnValue(const T &V, APValue &R) { - R = V.toAPValue(); +/// Convert a value to an APValue. +template <typename T> +bool ReturnValue(const InterpState &S, const T &V, APValue &R) { + R = V.toAPValue(S.getCtx()); return true; } @@ -49,9 +51,14 @@ bool CheckExtern(InterpState &S, CodePtr OpPC, const Pointer &Ptr); /// Checks if the array is offsetable. bool CheckArray(InterpState &S, CodePtr OpPC, const Pointer &Ptr); -/// Checks if a pointer is live and accesible. +/// Checks if a pointer is live and accessible. bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK); + +/// Checks if a pointer is a dummy pointer. +bool CheckDummy(InterpState &S, CodePtr OpPC, const Pointer &Ptr, + AccessKinds AK); + /// Checks if a pointer is null. bool CheckNull(InterpState &S, CodePtr OpPC, const Pointer &Ptr, CheckSubobjectKind CSK); @@ -64,14 +71,32 @@ bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr, bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr, CheckSubobjectKind CSK); +/// Checks if Ptr is a one-past-the-end pointer. +bool CheckSubobject(InterpState &S, CodePtr OpPC, const Pointer &Ptr, + CheckSubobjectKind CSK); + +/// Checks if the dowcast using the given offset is possible with the given +/// pointer. +bool CheckDowncast(InterpState &S, CodePtr OpPC, const Pointer &Ptr, + uint32_t Offset); + /// Checks if a pointer points to const storage. bool CheckConst(InterpState &S, CodePtr OpPC, const Pointer &Ptr); +/// Checks if the Descriptor is of a constexpr or const global variable. +bool CheckConstant(InterpState &S, CodePtr OpPC, const Descriptor *Desc); + /// Checks if a pointer points to a mutable field. bool CheckMutable(InterpState &S, CodePtr OpPC, const Pointer &Ptr); /// Checks if a value can be loaded from a block. -bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr); +bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr, + AccessKinds AK = AK_Read); + +bool CheckInitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr, + AccessKinds AK); +/// Check if a global variable is initialized. +bool CheckGlobalInitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr); /// Checks if a value can be stored in a block. bool CheckStore(InterpState &S, CodePtr OpPC, const Pointer &Ptr); @@ -83,7 +108,11 @@ bool CheckInvoke(InterpState &S, CodePtr OpPC, const Pointer &Ptr); bool CheckInit(InterpState &S, CodePtr OpPC, const Pointer &Ptr); /// Checks if a method can be called. -bool CheckCallable(InterpState &S, CodePtr OpPC, Function *F); +bool CheckCallable(InterpState &S, CodePtr OpPC, const Function *F); + +/// Checks if calling the currently active function would exceed +/// the allowed call depth. +bool CheckCallDepth(InterpState &S, CodePtr OpPC); /// Checks the 'this' pointer. bool CheckThis(InterpState &S, CodePtr OpPC, const Pointer &This); @@ -91,7 +120,208 @@ bool CheckThis(InterpState &S, CodePtr OpPC, const Pointer &This); /// Checks if a method is pure virtual. bool CheckPure(InterpState &S, CodePtr OpPC, const CXXMethodDecl *MD); -template <typename T> inline bool IsTrue(const T &V) { return !V.isZero(); } +/// Checks if all the arguments annotated as 'nonnull' are in fact not null. +bool CheckNonNullArgs(InterpState &S, CodePtr OpPC, const Function *F, + const CallExpr *CE, unsigned ArgSize); + +/// Checks if dynamic memory allocation is available in the current +/// language mode. +bool CheckDynamicMemoryAllocation(InterpState &S, CodePtr OpPC); + +/// Diagnose mismatched new[]/delete or new/delete[] pairs. +bool CheckNewDeleteForms(InterpState &S, CodePtr OpPC, bool NewWasArray, + bool DeleteIsArray, const Descriptor *D, + const Expr *NewExpr); + +/// Check the source of the pointer passed to delete/delete[] has actually +/// been heap allocated by us. +bool CheckDeleteSource(InterpState &S, CodePtr OpPC, const Expr *Source, + const Pointer &Ptr); + +/// Sets the given integral value to the pointer, which is of +/// a std::{weak,partial,strong}_ordering type. +bool SetThreeWayComparisonField(InterpState &S, CodePtr OpPC, + const Pointer &Ptr, const APSInt &IntValue); + +/// Copy the contents of Src into Dest. +bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest); + +/// Checks if the shift operation is legal. +template <typename LT, typename RT> +bool CheckShift(InterpState &S, CodePtr OpPC, const LT &LHS, const RT &RHS, + unsigned Bits) { + if (RHS.isNegative()) { + const SourceInfo &Loc = S.Current->getSource(OpPC); + S.CCEDiag(Loc, diag::note_constexpr_negative_shift) << RHS.toAPSInt(); + if (!S.noteUndefinedBehavior()) + return false; + } + + // C++11 [expr.shift]p1: Shift width must be less than the bit width of + // the shifted type. + if (Bits > 1 && RHS >= RT::from(Bits, RHS.bitWidth())) { + const Expr *E = S.Current->getExpr(OpPC); + const APSInt Val = RHS.toAPSInt(); + QualType Ty = E->getType(); + S.CCEDiag(E, diag::note_constexpr_large_shift) << Val << Ty << Bits; + if (!S.noteUndefinedBehavior()) + return false; + } + + if (LHS.isSigned() && !S.getLangOpts().CPlusPlus20) { + const Expr *E = S.Current->getExpr(OpPC); + // C++11 [expr.shift]p2: A signed left shift must have a non-negative + // operand, and must not overflow the corresponding unsigned type. + if (LHS.isNegative()) { + S.CCEDiag(E, diag::note_constexpr_lshift_of_negative) << LHS.toAPSInt(); + if (!S.noteUndefinedBehavior()) + return false; + } else if (LHS.toUnsigned().countLeadingZeros() < + static_cast<unsigned>(RHS)) { + S.CCEDiag(E, diag::note_constexpr_lshift_discards); + if (!S.noteUndefinedBehavior()) + return false; + } + } + + // C++2a [expr.shift]p2: [P0907R4]: + // E1 << E2 is the unique value congruent to + // E1 x 2^E2 module 2^N. + return true; +} + +/// Checks if Div/Rem operation on LHS and RHS is valid. +template <typename T> +bool CheckDivRem(InterpState &S, CodePtr OpPC, const T &LHS, const T &RHS) { + if (RHS.isZero()) { + const auto *Op = cast<BinaryOperator>(S.Current->getExpr(OpPC)); + if constexpr (std::is_same_v<T, Floating>) { + S.CCEDiag(Op, diag::note_expr_divide_by_zero) + << Op->getRHS()->getSourceRange(); + return true; + } + + S.FFDiag(Op, diag::note_expr_divide_by_zero) + << Op->getRHS()->getSourceRange(); + return false; + } + + if (LHS.isSigned() && LHS.isMin() && RHS.isNegative() && RHS.isMinusOne()) { + APSInt LHSInt = LHS.toAPSInt(); + SmallString<32> Trunc; + (-LHSInt.extend(LHSInt.getBitWidth() + 1)).toString(Trunc, 10); + const SourceInfo &Loc = S.Current->getSource(OpPC); + const Expr *E = S.Current->getExpr(OpPC); + S.CCEDiag(Loc, diag::note_constexpr_overflow) << Trunc << E->getType(); + return false; + } + return true; +} + +template <typename SizeT> +bool CheckArraySize(InterpState &S, CodePtr OpPC, SizeT *NumElements, + unsigned ElemSize, bool IsNoThrow) { + // FIXME: Both the SizeT::from() as well as the + // NumElements.toAPSInt() in this function are rather expensive. + + // FIXME: GH63562 + // APValue stores array extents as unsigned, + // so anything that is greater that unsigned would overflow when + // constructing the array, we catch this here. + SizeT MaxElements = SizeT::from(Descriptor::MaxArrayElemBytes / ElemSize); + if (NumElements->toAPSInt().getActiveBits() > + ConstantArrayType::getMaxSizeBits(S.getCtx()) || + *NumElements > MaxElements) { + if (!IsNoThrow) { + const SourceInfo &Loc = S.Current->getSource(OpPC); + S.FFDiag(Loc, diag::note_constexpr_new_too_large) + << NumElements->toDiagnosticString(S.getCtx()); + } + return false; + } + return true; +} + +/// Checks if the result of a floating-point operation is valid +/// in the current context. +bool CheckFloatResult(InterpState &S, CodePtr OpPC, const Floating &Result, + APFloat::opStatus Status); + +/// Checks why the given DeclRefExpr is invalid. +bool CheckDeclRef(InterpState &S, CodePtr OpPC, const DeclRefExpr *DR); + +/// Interpreter entry point. +bool Interpret(InterpState &S, APValue &Result); + +/// Interpret a builtin function. +bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F, + const CallExpr *Call); + +/// Interpret an offsetof operation. +bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E, + llvm::ArrayRef<int64_t> ArrayIndices, int64_t &Result); + +inline bool Invalid(InterpState &S, CodePtr OpPC); + +enum class ArithOp { Add, Sub }; + +//===----------------------------------------------------------------------===// +// Returning values +//===----------------------------------------------------------------------===// + +void cleanupAfterFunctionCall(InterpState &S, CodePtr OpPC); + +template <PrimType Name, class T = typename PrimConv<Name>::T> +bool Ret(InterpState &S, CodePtr &PC, APValue &Result) { + const T &Ret = S.Stk.pop<T>(); + + // Make sure returned pointers are live. We might be trying to return a + // pointer or reference to a local variable. + // Just return false, since a diagnostic has already been emitted in Sema. + if constexpr (std::is_same_v<T, Pointer>) { + // FIXME: We could be calling isLive() here, but the emitted diagnostics + // seem a little weird, at least if the returned expression is of + // pointer type. + // Null pointers are considered live here. + if (!Ret.isZero() && !Ret.isLive()) + return false; + } + + assert(S.Current); + assert(S.Current->getFrameOffset() == S.Stk.size() && "Invalid frame"); + if (!S.checkingPotentialConstantExpression() || S.Current->Caller) + cleanupAfterFunctionCall(S, PC); + + if (InterpFrame *Caller = S.Current->Caller) { + PC = S.Current->getRetPC(); + delete S.Current; + S.Current = Caller; + S.Stk.push<T>(Ret); + } else { + delete S.Current; + S.Current = nullptr; + if (!ReturnValue<T>(S, Ret, Result)) + return false; + } + return true; +} + +inline bool RetVoid(InterpState &S, CodePtr &PC, APValue &Result) { + assert(S.Current->getFrameOffset() == S.Stk.size() && "Invalid frame"); + + if (!S.checkingPotentialConstantExpression() || S.Current->Caller) + cleanupAfterFunctionCall(S, PC); + + if (InterpFrame *Caller = S.Current->Caller) { + PC = S.Current->getRetPC(); + delete S.Current; + S.Current = Caller; + } else { + delete S.Current; + S.Current = nullptr; + } + return true; +} //===----------------------------------------------------------------------===// // Add, Sub, Mul @@ -119,14 +349,22 @@ bool AddSubMulHelper(InterpState &S, CodePtr OpPC, unsigned Bits, const T &LHS, QualType Type = E->getType(); if (S.checkingForUndefinedBehavior()) { SmallString<32> Trunc; - Value.trunc(Result.bitWidth()).toString(Trunc, 10); + Value.trunc(Result.bitWidth()) + .toString(Trunc, 10, Result.isSigned(), /*formatAsCLiteral=*/false, + /*UpperCase=*/true, /*InsertSeparators=*/true); auto Loc = E->getExprLoc(); - S.report(Loc, diag::warn_integer_constant_overflow) << Trunc << Type; - return true; - } else { - S.CCEDiag(E, diag::note_constexpr_overflow) << Value << Type; - return S.noteUndefinedBehavior(); + S.report(Loc, diag::warn_integer_constant_overflow) + << Trunc << Type << E->getSourceRange(); } + + S.CCEDiag(E, diag::note_constexpr_overflow) << Value << Type; + + if (!S.noteUndefinedBehavior()) { + S.Stk.pop<T>(); + return false; + } + + return true; } template <PrimType Name, class T = typename PrimConv<Name>::T> @@ -137,6 +375,16 @@ bool Add(InterpState &S, CodePtr OpPC) { return AddSubMulHelper<T, T::add, std::plus>(S, OpPC, Bits, LHS, RHS); } +inline bool Addf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) { + const Floating &RHS = S.Stk.pop<Floating>(); + const Floating &LHS = S.Stk.pop<Floating>(); + + Floating Result; + auto Status = Floating::add(LHS, RHS, RM, &Result); + S.Stk.push<Floating>(Result); + return CheckFloatResult(S, OpPC, Result, Status); +} + template <PrimType Name, class T = typename PrimConv<Name>::T> bool Sub(InterpState &S, CodePtr OpPC) { const T &RHS = S.Stk.pop<T>(); @@ -145,6 +393,16 @@ bool Sub(InterpState &S, CodePtr OpPC) { return AddSubMulHelper<T, T::sub, std::minus>(S, OpPC, Bits, LHS, RHS); } +inline bool Subf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) { + const Floating &RHS = S.Stk.pop<Floating>(); + const Floating &LHS = S.Stk.pop<Floating>(); + + Floating Result; + auto Status = Floating::sub(LHS, RHS, RM, &Result); + S.Stk.push<Floating>(Result); + return CheckFloatResult(S, OpPC, Result, Status); +} + template <PrimType Name, class T = typename PrimConv<Name>::T> bool Mul(InterpState &S, CodePtr OpPC) { const T &RHS = S.Stk.pop<T>(); @@ -153,6 +411,479 @@ bool Mul(InterpState &S, CodePtr OpPC) { return AddSubMulHelper<T, T::mul, std::multiplies>(S, OpPC, Bits, LHS, RHS); } +inline bool Mulf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) { + const Floating &RHS = S.Stk.pop<Floating>(); + const Floating &LHS = S.Stk.pop<Floating>(); + + Floating Result; + auto Status = Floating::mul(LHS, RHS, RM, &Result); + S.Stk.push<Floating>(Result); + return CheckFloatResult(S, OpPC, Result, Status); +} + +template <PrimType Name, class T = typename PrimConv<Name>::T> +inline bool Mulc(InterpState &S, CodePtr OpPC) { + const Pointer &RHS = S.Stk.pop<Pointer>(); + const Pointer &LHS = S.Stk.pop<Pointer>(); + const Pointer &Result = S.Stk.peek<Pointer>(); + + if constexpr (std::is_same_v<T, Floating>) { + APFloat A = LHS.atIndex(0).deref<Floating>().getAPFloat(); + APFloat B = LHS.atIndex(1).deref<Floating>().getAPFloat(); + APFloat C = RHS.atIndex(0).deref<Floating>().getAPFloat(); + APFloat D = RHS.atIndex(1).deref<Floating>().getAPFloat(); + + APFloat ResR(A.getSemantics()); + APFloat ResI(A.getSemantics()); + HandleComplexComplexMul(A, B, C, D, ResR, ResI); + + // Copy into the result. + Result.atIndex(0).deref<Floating>() = Floating(ResR); + Result.atIndex(0).initialize(); + Result.atIndex(1).deref<Floating>() = Floating(ResI); + Result.atIndex(1).initialize(); + Result.initialize(); + } else { + // Integer element type. + const T &LHSR = LHS.atIndex(0).deref<T>(); + const T &LHSI = LHS.atIndex(1).deref<T>(); + const T &RHSR = RHS.atIndex(0).deref<T>(); + const T &RHSI = RHS.atIndex(1).deref<T>(); + unsigned Bits = LHSR.bitWidth(); + + // real(Result) = (real(LHS) * real(RHS)) - (imag(LHS) * imag(RHS)) + T A; + if (T::mul(LHSR, RHSR, Bits, &A)) + return false; + T B; + if (T::mul(LHSI, RHSI, Bits, &B)) + return false; + if (T::sub(A, B, Bits, &Result.atIndex(0).deref<T>())) + return false; + Result.atIndex(0).initialize(); + + // imag(Result) = (real(LHS) * imag(RHS)) + (imag(LHS) * real(RHS)) + if (T::mul(LHSR, RHSI, Bits, &A)) + return false; + if (T::mul(LHSI, RHSR, Bits, &B)) + return false; + if (T::add(A, B, Bits, &Result.atIndex(1).deref<T>())) + return false; + Result.atIndex(1).initialize(); + Result.initialize(); + } + + return true; +} + +template <PrimType Name, class T = typename PrimConv<Name>::T> +inline bool Divc(InterpState &S, CodePtr OpPC) { + const Pointer &RHS = S.Stk.pop<Pointer>(); + const Pointer &LHS = S.Stk.pop<Pointer>(); + const Pointer &Result = S.Stk.peek<Pointer>(); + + if constexpr (std::is_same_v<T, Floating>) { + APFloat A = LHS.atIndex(0).deref<Floating>().getAPFloat(); + APFloat B = LHS.atIndex(1).deref<Floating>().getAPFloat(); + APFloat C = RHS.atIndex(0).deref<Floating>().getAPFloat(); + APFloat D = RHS.atIndex(1).deref<Floating>().getAPFloat(); + + APFloat ResR(A.getSemantics()); + APFloat ResI(A.getSemantics()); + HandleComplexComplexDiv(A, B, C, D, ResR, ResI); + + // Copy into the result. + Result.atIndex(0).deref<Floating>() = Floating(ResR); + Result.atIndex(0).initialize(); + Result.atIndex(1).deref<Floating>() = Floating(ResI); + Result.atIndex(1).initialize(); + Result.initialize(); + } else { + // Integer element type. + const T &LHSR = LHS.atIndex(0).deref<T>(); + const T &LHSI = LHS.atIndex(1).deref<T>(); + const T &RHSR = RHS.atIndex(0).deref<T>(); + const T &RHSI = RHS.atIndex(1).deref<T>(); + unsigned Bits = LHSR.bitWidth(); + const T Zero = T::from(0, Bits); + + if (Compare(RHSR, Zero) == ComparisonCategoryResult::Equal && + Compare(RHSI, Zero) == ComparisonCategoryResult::Equal) { + const SourceInfo &E = S.Current->getSource(OpPC); + S.FFDiag(E, diag::note_expr_divide_by_zero); + return false; + } + + // Den = real(RHS)² + imag(RHS)² + T A, B; + if (T::mul(RHSR, RHSR, Bits, &A) || T::mul(RHSI, RHSI, Bits, &B)) + return false; + T Den; + if (T::add(A, B, Bits, &Den)) + return false; + + // real(Result) = ((real(LHS) * real(RHS)) + (imag(LHS) * imag(RHS))) / Den + T &ResultR = Result.atIndex(0).deref<T>(); + T &ResultI = Result.atIndex(1).deref<T>(); + + if (T::mul(LHSR, RHSR, Bits, &A) || T::mul(LHSI, RHSI, Bits, &B)) + return false; + if (T::add(A, B, Bits, &ResultR)) + return false; + if (T::div(ResultR, Den, Bits, &ResultR)) + return false; + Result.atIndex(0).initialize(); + + // imag(Result) = ((imag(LHS) * real(RHS)) - (real(LHS) * imag(RHS))) / Den + if (T::mul(LHSI, RHSR, Bits, &A) || T::mul(LHSR, RHSI, Bits, &B)) + return false; + if (T::sub(A, B, Bits, &ResultI)) + return false; + if (T::div(ResultI, Den, Bits, &ResultI)) + return false; + Result.atIndex(1).initialize(); + Result.initialize(); + } + + return true; +} + +/// 1) Pops the RHS from the stack. +/// 2) Pops the LHS from the stack. +/// 3) Pushes 'LHS & RHS' on the stack +template <PrimType Name, class T = typename PrimConv<Name>::T> +bool BitAnd(InterpState &S, CodePtr OpPC) { + const T &RHS = S.Stk.pop<T>(); + const T &LHS = S.Stk.pop<T>(); + + unsigned Bits = RHS.bitWidth(); + T Result; + if (!T::bitAnd(LHS, RHS, Bits, &Result)) { + S.Stk.push<T>(Result); + return true; + } + return false; +} + +/// 1) Pops the RHS from the stack. +/// 2) Pops the LHS from the stack. +/// 3) Pushes 'LHS | RHS' on the stack +template <PrimType Name, class T = typename PrimConv<Name>::T> +bool BitOr(InterpState &S, CodePtr OpPC) { + const T &RHS = S.Stk.pop<T>(); + const T &LHS = S.Stk.pop<T>(); + + unsigned Bits = RHS.bitWidth(); + T Result; + if (!T::bitOr(LHS, RHS, Bits, &Result)) { + S.Stk.push<T>(Result); + return true; + } + return false; +} + +/// 1) Pops the RHS from the stack. +/// 2) Pops the LHS from the stack. +/// 3) Pushes 'LHS ^ RHS' on the stack +template <PrimType Name, class T = typename PrimConv<Name>::T> +bool BitXor(InterpState &S, CodePtr OpPC) { + const T &RHS = S.Stk.pop<T>(); + const T &LHS = S.Stk.pop<T>(); + + unsigned Bits = RHS.bitWidth(); + T Result; + if (!T::bitXor(LHS, RHS, Bits, &Result)) { + S.Stk.push<T>(Result); + return true; + } + return false; +} + +/// 1) Pops the RHS from the stack. +/// 2) Pops the LHS from the stack. +/// 3) Pushes 'LHS % RHS' on the stack (the remainder of dividing LHS by RHS). +template <PrimType Name, class T = typename PrimConv<Name>::T> +bool Rem(InterpState &S, CodePtr OpPC) { + const T &RHS = S.Stk.pop<T>(); + const T &LHS = S.Stk.pop<T>(); + + if (!CheckDivRem(S, OpPC, LHS, RHS)) + return false; + + const unsigned Bits = RHS.bitWidth() * 2; + T Result; + if (!T::rem(LHS, RHS, Bits, &Result)) { + S.Stk.push<T>(Result); + return true; + } + return false; +} + +/// 1) Pops the RHS from the stack. +/// 2) Pops the LHS from the stack. +/// 3) Pushes 'LHS / RHS' on the stack +template <PrimType Name, class T = typename PrimConv<Name>::T> +bool Div(InterpState &S, CodePtr OpPC) { + const T &RHS = S.Stk.pop<T>(); + const T &LHS = S.Stk.pop<T>(); + + if (!CheckDivRem(S, OpPC, LHS, RHS)) + return false; + + const unsigned Bits = RHS.bitWidth() * 2; + T Result; + if (!T::div(LHS, RHS, Bits, &Result)) { + S.Stk.push<T>(Result); + return true; + } + return false; +} + +inline bool Divf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) { + const Floating &RHS = S.Stk.pop<Floating>(); + const Floating &LHS = S.Stk.pop<Floating>(); + + if (!CheckDivRem(S, OpPC, LHS, RHS)) + return false; + + Floating Result; + auto Status = Floating::div(LHS, RHS, RM, &Result); + S.Stk.push<Floating>(Result); + return CheckFloatResult(S, OpPC, Result, Status); +} + +//===----------------------------------------------------------------------===// +// Inv +//===----------------------------------------------------------------------===// + +template <PrimType Name, class T = typename PrimConv<Name>::T> +bool Inv(InterpState &S, CodePtr OpPC) { + using BoolT = PrimConv<PT_Bool>::T; + const T &Val = S.Stk.pop<T>(); + const unsigned Bits = Val.bitWidth(); + Boolean R; + Boolean::inv(BoolT::from(Val, Bits), &R); + + S.Stk.push<BoolT>(R); + return true; +} + +//===----------------------------------------------------------------------===// +// Neg +//===----------------------------------------------------------------------===// + +template <PrimType Name, class T = typename PrimConv<Name>::T> +bool Neg(InterpState &S, CodePtr OpPC) { + const T &Value = S.Stk.pop<T>(); + T Result; + + if (!T::neg(Value, &Result)) { + S.Stk.push<T>(Result); + return true; + } + + assert(isIntegralType(Name) && + "don't expect other types to fail at constexpr negation"); + S.Stk.push<T>(Result); + + APSInt NegatedValue = -Value.toAPSInt(Value.bitWidth() + 1); + const Expr *E = S.Current->getExpr(OpPC); + QualType Type = E->getType(); + + if (S.checkingForUndefinedBehavior()) { + SmallString<32> Trunc; + NegatedValue.trunc(Result.bitWidth()) + .toString(Trunc, 10, Result.isSigned(), /*formatAsCLiteral=*/false, + /*UpperCase=*/true, /*InsertSeparators=*/true); + auto Loc = E->getExprLoc(); + S.report(Loc, diag::warn_integer_constant_overflow) + << Trunc << Type << E->getSourceRange(); + return true; + } + + S.CCEDiag(E, diag::note_constexpr_overflow) << NegatedValue << Type; + return S.noteUndefinedBehavior(); +} + +enum class PushVal : bool { + No, + Yes, +}; +enum class IncDecOp { + Inc, + Dec, +}; + +template <typename T, IncDecOp Op, PushVal DoPush> +bool IncDecHelper(InterpState &S, CodePtr OpPC, const Pointer &Ptr) { + assert(!Ptr.isDummy()); + + if constexpr (std::is_same_v<T, Boolean>) { + if (!S.getLangOpts().CPlusPlus14) + return Invalid(S, OpPC); + } + + const T &Value = Ptr.deref<T>(); + T Result; + + if constexpr (DoPush == PushVal::Yes) + S.Stk.push<T>(Value); + + if constexpr (Op == IncDecOp::Inc) { + if (!T::increment(Value, &Result)) { + Ptr.deref<T>() = Result; + return true; + } + } else { + if (!T::decrement(Value, &Result)) { + Ptr.deref<T>() = Result; + return true; + } + } + + // Something went wrong with the previous operation. Compute the + // result with another bit of precision. + unsigned Bits = Value.bitWidth() + 1; + APSInt APResult; + if constexpr (Op == IncDecOp::Inc) + APResult = ++Value.toAPSInt(Bits); + else + APResult = --Value.toAPSInt(Bits); + + // Report undefined behaviour, stopping if required. + const Expr *E = S.Current->getExpr(OpPC); + QualType Type = E->getType(); + if (S.checkingForUndefinedBehavior()) { + SmallString<32> Trunc; + APResult.trunc(Result.bitWidth()) + .toString(Trunc, 10, Result.isSigned(), /*formatAsCLiteral=*/false, + /*UpperCase=*/true, /*InsertSeparators=*/true); + auto Loc = E->getExprLoc(); + S.report(Loc, diag::warn_integer_constant_overflow) + << Trunc << Type << E->getSourceRange(); + return true; + } + + S.CCEDiag(E, diag::note_constexpr_overflow) << APResult << Type; + return S.noteUndefinedBehavior(); +} + +/// 1) Pops a pointer from the stack +/// 2) Load the value from the pointer +/// 3) Writes the value increased by one back to the pointer +/// 4) Pushes the original (pre-inc) value on the stack. +template <PrimType Name, class T = typename PrimConv<Name>::T> +bool Inc(InterpState &S, CodePtr OpPC) { + const Pointer &Ptr = S.Stk.pop<Pointer>(); + if (!CheckLoad(S, OpPC, Ptr, AK_Increment)) + return false; + + return IncDecHelper<T, IncDecOp::Inc, PushVal::Yes>(S, OpPC, Ptr); +} + +/// 1) Pops a pointer from the stack +/// 2) Load the value from the pointer +/// 3) Writes the value increased by one back to the pointer +template <PrimType Name, class T = typename PrimConv<Name>::T> +bool IncPop(InterpState &S, CodePtr OpPC) { + const Pointer &Ptr = S.Stk.pop<Pointer>(); + if (!CheckLoad(S, OpPC, Ptr, AK_Increment)) + return false; + + return IncDecHelper<T, IncDecOp::Inc, PushVal::No>(S, OpPC, Ptr); +} + +/// 1) Pops a pointer from the stack +/// 2) Load the value from the pointer +/// 3) Writes the value decreased by one back to the pointer +/// 4) Pushes the original (pre-dec) value on the stack. +template <PrimType Name, class T = typename PrimConv<Name>::T> +bool Dec(InterpState &S, CodePtr OpPC) { + const Pointer &Ptr = S.Stk.pop<Pointer>(); + if (!CheckLoad(S, OpPC, Ptr, AK_Decrement)) + return false; + + return IncDecHelper<T, IncDecOp::Dec, PushVal::Yes>(S, OpPC, Ptr); +} + +/// 1) Pops a pointer from the stack +/// 2) Load the value from the pointer +/// 3) Writes the value decreased by one back to the pointer +template <PrimType Name, class T = typename PrimConv<Name>::T> +bool DecPop(InterpState &S, CodePtr OpPC) { + const Pointer &Ptr = S.Stk.pop<Pointer>(); + if (!CheckLoad(S, OpPC, Ptr, AK_Decrement)) + return false; + + return IncDecHelper<T, IncDecOp::Dec, PushVal::No>(S, OpPC, Ptr); +} + +template <IncDecOp Op, PushVal DoPush> +bool IncDecFloatHelper(InterpState &S, CodePtr OpPC, const Pointer &Ptr, + llvm::RoundingMode RM) { + Floating Value = Ptr.deref<Floating>(); + Floating Result; + + if constexpr (DoPush == PushVal::Yes) + S.Stk.push<Floating>(Value); + + llvm::APFloat::opStatus Status; + if constexpr (Op == IncDecOp::Inc) + Status = Floating::increment(Value, RM, &Result); + else + Status = Floating::decrement(Value, RM, &Result); + + Ptr.deref<Floating>() = Result; + + return CheckFloatResult(S, OpPC, Result, Status); +} + +inline bool Incf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) { + const Pointer &Ptr = S.Stk.pop<Pointer>(); + if (!CheckLoad(S, OpPC, Ptr, AK_Increment)) + return false; + + return IncDecFloatHelper<IncDecOp::Inc, PushVal::Yes>(S, OpPC, Ptr, RM); +} + +inline bool IncfPop(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) { + const Pointer &Ptr = S.Stk.pop<Pointer>(); + if (!CheckLoad(S, OpPC, Ptr, AK_Increment)) + return false; + + return IncDecFloatHelper<IncDecOp::Inc, PushVal::No>(S, OpPC, Ptr, RM); +} + +inline bool Decf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) { + const Pointer &Ptr = S.Stk.pop<Pointer>(); + if (!CheckLoad(S, OpPC, Ptr, AK_Decrement)) + return false; + + return IncDecFloatHelper<IncDecOp::Dec, PushVal::Yes>(S, OpPC, Ptr, RM); +} + +inline bool DecfPop(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) { + const Pointer &Ptr = S.Stk.pop<Pointer>(); + if (!CheckLoad(S, OpPC, Ptr, AK_Decrement)) + return false; + + return IncDecFloatHelper<IncDecOp::Dec, PushVal::No>(S, OpPC, Ptr, RM); +} + +/// 1) Pops the value from the stack. +/// 2) Pushes the bitwise complemented value on the stack (~V). +template <PrimType Name, class T = typename PrimConv<Name>::T> +bool Comp(InterpState &S, CodePtr OpPC) { + const T &Val = S.Stk.pop<T>(); + T Result; + if (!T::comp(Val, &Result)) { + S.Stk.push<T>(Result); + return true; + } + + return false; +} + //===----------------------------------------------------------------------===// // EQ, NE, GT, GE, LT, LE //===----------------------------------------------------------------------===// @@ -161,6 +892,9 @@ using CompareFn = llvm::function_ref<bool(ComparisonCategoryResult)>; template <typename T> bool CmpHelper(InterpState &S, CodePtr OpPC, CompareFn Fn) { + assert((!std::is_same_v<T, MemberPointer>) && + "Non-equality comparisons on member pointer types should already be " + "rejected in Sema."); using BoolT = PrimConv<PT_Bool>::T; const T &RHS = S.Stk.pop<T>(); const T &LHS = S.Stk.pop<T>(); @@ -173,6 +907,40 @@ bool CmpHelperEQ(InterpState &S, CodePtr OpPC, CompareFn Fn) { return CmpHelper<T>(S, OpPC, Fn); } +/// Function pointers cannot be compared in an ordered way. +template <> +inline bool CmpHelper<FunctionPointer>(InterpState &S, CodePtr OpPC, + CompareFn Fn) { + const auto &RHS = S.Stk.pop<FunctionPointer>(); + const auto &LHS = S.Stk.pop<FunctionPointer>(); + + const SourceInfo &Loc = S.Current->getSource(OpPC); + S.FFDiag(Loc, diag::note_constexpr_pointer_comparison_unspecified) + << LHS.toDiagnosticString(S.getCtx()) + << RHS.toDiagnosticString(S.getCtx()); + return false; +} + +template <> +inline bool CmpHelperEQ<FunctionPointer>(InterpState &S, CodePtr OpPC, + CompareFn Fn) { + const auto &RHS = S.Stk.pop<FunctionPointer>(); + const auto &LHS = S.Stk.pop<FunctionPointer>(); + + // We cannot compare against weak declarations at compile time. + for (const auto &FP : {LHS, RHS}) { + if (FP.isWeak()) { + const SourceInfo &Loc = S.Current->getSource(OpPC); + S.FFDiag(Loc, diag::note_constexpr_pointer_weak_comparison) + << FP.toDiagnosticString(S.getCtx()); + return false; + } + } + + S.Stk.push<Boolean>(Boolean::from(Fn(LHS.compare(RHS)))); + return true; +} + template <> inline bool CmpHelper<Pointer>(InterpState &S, CodePtr OpPC, CompareFn Fn) { using BoolT = PrimConv<PT_Bool>::T; @@ -181,7 +949,9 @@ inline bool CmpHelper<Pointer>(InterpState &S, CodePtr OpPC, CompareFn Fn) { if (!Pointer::hasSameBase(LHS, RHS)) { const SourceInfo &Loc = S.Current->getSource(OpPC); - S.FFDiag(Loc, diag::note_invalid_subexpr_in_const_expr); + S.FFDiag(Loc, diag::note_constexpr_pointer_comparison_unspecified) + << LHS.toDiagnosticString(S.getCtx()) + << RHS.toDiagnosticString(S.getCtx()); return false; } else { unsigned VL = LHS.getByteOffset(); @@ -202,17 +972,94 @@ inline bool CmpHelperEQ<Pointer>(InterpState &S, CodePtr OpPC, CompareFn Fn) { return true; } + // Reject comparisons to weak pointers. + for (const auto &P : {LHS, RHS}) { + if (P.isZero()) + continue; + if (P.isWeak()) { + const SourceInfo &Loc = S.Current->getSource(OpPC); + S.FFDiag(Loc, diag::note_constexpr_pointer_weak_comparison) + << P.toDiagnosticString(S.getCtx()); + return false; + } + } + if (!Pointer::hasSameBase(LHS, RHS)) { + if (LHS.isOnePastEnd() && !RHS.isOnePastEnd() && !RHS.isZero() && + RHS.getOffset() == 0) { + const SourceInfo &Loc = S.Current->getSource(OpPC); + S.FFDiag(Loc, diag::note_constexpr_pointer_comparison_past_end) + << LHS.toDiagnosticString(S.getCtx()); + return false; + } else if (RHS.isOnePastEnd() && !LHS.isOnePastEnd() && !LHS.isZero() && + LHS.getOffset() == 0) { + const SourceInfo &Loc = S.Current->getSource(OpPC); + S.FFDiag(Loc, diag::note_constexpr_pointer_comparison_past_end) + << RHS.toDiagnosticString(S.getCtx()); + return false; + } + S.Stk.push<BoolT>(BoolT::from(Fn(ComparisonCategoryResult::Unordered))); return true; } else { unsigned VL = LHS.getByteOffset(); unsigned VR = RHS.getByteOffset(); + + // In our Pointer class, a pointer to an array and a pointer to the first + // element in the same array are NOT equal. They have the same Base value, + // but a different Offset. This is a pretty rare case, so we fix this here + // by comparing pointers to the first elements. + if (!LHS.isZero() && LHS.isArrayRoot()) + VL = LHS.atIndex(0).getByteOffset(); + if (!RHS.isZero() && RHS.isArrayRoot()) + VR = RHS.atIndex(0).getByteOffset(); + S.Stk.push<BoolT>(BoolT::from(Fn(Compare(VL, VR)))); return true; } } +template <> +inline bool CmpHelperEQ<MemberPointer>(InterpState &S, CodePtr OpPC, + CompareFn Fn) { + const auto &RHS = S.Stk.pop<MemberPointer>(); + const auto &LHS = S.Stk.pop<MemberPointer>(); + + // If either operand is a pointer to a weak function, the comparison is not + // constant. + for (const auto &MP : {LHS, RHS}) { + if (const CXXMethodDecl *MD = MP.getMemberFunction(); MD && MD->isWeak()) { + const SourceInfo &Loc = S.Current->getSource(OpPC); + S.FFDiag(Loc, diag::note_constexpr_mem_pointer_weak_comparison) << MD; + return false; + } + } + + // C++11 [expr.eq]p2: + // If both operands are null, they compare equal. Otherwise if only one is + // null, they compare unequal. + if (LHS.isZero() && RHS.isZero()) { + S.Stk.push<Boolean>(Fn(ComparisonCategoryResult::Equal)); + return true; + } + if (LHS.isZero() || RHS.isZero()) { + S.Stk.push<Boolean>(Fn(ComparisonCategoryResult::Unordered)); + return true; + } + + // We cannot compare against virtual declarations at compile time. + for (const auto &MP : {LHS, RHS}) { + if (const CXXMethodDecl *MD = MP.getMemberFunction(); + MD && MD->isVirtual()) { + const SourceInfo &Loc = S.Current->getSource(OpPC); + S.CCEDiag(Loc, diag::note_constexpr_compare_virtual_mem_ptr) << MD; + } + } + + S.Stk.push<Boolean>(Boolean::from(Fn(LHS.compare(RHS)))); + return true; +} + template <PrimType Name, class T = typename PrimConv<Name>::T> bool EQ(InterpState &S, CodePtr OpPC) { return CmpHelperEQ<T>(S, OpPC, [](ComparisonCategoryResult R) { @@ -221,6 +1068,30 @@ bool EQ(InterpState &S, CodePtr OpPC) { } template <PrimType Name, class T = typename PrimConv<Name>::T> +bool CMP3(InterpState &S, CodePtr OpPC, const ComparisonCategoryInfo *CmpInfo) { + const T &RHS = S.Stk.pop<T>(); + const T &LHS = S.Stk.pop<T>(); + const Pointer &P = S.Stk.peek<Pointer>(); + + ComparisonCategoryResult CmpResult = LHS.compare(RHS); + if (CmpResult == ComparisonCategoryResult::Unordered) { + // This should only happen with pointers. + const SourceInfo &Loc = S.Current->getSource(OpPC); + S.FFDiag(Loc, diag::note_constexpr_pointer_comparison_unspecified) + << LHS.toDiagnosticString(S.getCtx()) + << RHS.toDiagnosticString(S.getCtx()); + return false; + } + + assert(CmpInfo); + const auto *CmpValueInfo = + CmpInfo->getValueInfo(CmpInfo->makeWeakResult(CmpResult)); + assert(CmpValueInfo); + assert(CmpValueInfo->hasValidIntValue()); + return SetThreeWayComparisonField(S, OpPC, P, CmpValueInfo->getIntValue()); +} + +template <PrimType Name, class T = typename PrimConv<Name>::T> bool NE(InterpState &S, CodePtr OpPC) { return CmpHelperEQ<T>(S, OpPC, [](ComparisonCategoryResult R) { return R != ComparisonCategoryResult::Equal; @@ -303,10 +1174,16 @@ bool Const(InterpState &S, CodePtr OpPC, const T &Arg) { template <PrimType Name, class T = typename PrimConv<Name>::T> bool GetLocal(InterpState &S, CodePtr OpPC, uint32_t I) { - S.Stk.push<T>(S.Current->getLocal<T>(I)); + const Pointer &Ptr = S.Current->getLocalPointer(I); + if (!CheckLoad(S, OpPC, Ptr)) + return false; + S.Stk.push<T>(Ptr.deref<T>()); return true; } +/// 1) Pops the value from the stack. +/// 2) Writes the value to the local variable with the +/// given offset. template <PrimType Name, class T = typename PrimConv<Name>::T> bool SetLocal(InterpState &S, CodePtr OpPC, uint32_t I) { S.Current->setLocal<T>(I, S.Stk.pop<T>()); @@ -328,6 +1205,8 @@ bool SetParam(InterpState &S, CodePtr OpPC, uint32_t I) { return true; } +/// 1) Peeks a pointer on the stack +/// 2) Pushes the value of the pointer's field on the stack template <PrimType Name, class T = typename PrimConv<Name>::T> bool GetField(InterpState &S, CodePtr OpPC, uint32_t I) { const Pointer &Obj = S.Stk.peek<Pointer>(); @@ -353,10 +1232,13 @@ bool SetField(InterpState &S, CodePtr OpPC, uint32_t I) { const Pointer &Field = Obj.atField(I); if (!CheckStore(S, OpPC, Field)) return false; + Field.initialize(); Field.deref<T>() = Value; return true; } +/// 1) Pops a pointer from the stack +/// 2) Pushes the value of the pointer's field on the stack template <PrimType Name, class T = typename PrimConv<Name>::T> bool GetFieldPop(InterpState &S, CodePtr OpPC, uint32_t I) { const Pointer &Obj = S.Stk.pop<Pointer>(); @@ -402,10 +1284,28 @@ bool SetThisField(InterpState &S, CodePtr OpPC, uint32_t I) { template <PrimType Name, class T = typename PrimConv<Name>::T> bool GetGlobal(InterpState &S, CodePtr OpPC, uint32_t I) { - auto *B = S.P.getGlobal(I); - if (B->isExtern()) + const Pointer &Ptr = S.P.getPtrGlobal(I); + if (!CheckConstant(S, OpPC, Ptr.getFieldDesc())) + return false; + if (Ptr.isExtern()) + return false; + + // If a global variable is uninitialized, that means the initializer we've + // compiled for it wasn't a constant expression. Diagnose that. + if (!CheckGlobalInitialized(S, OpPC, Ptr)) return false; - S.Stk.push<T>(B->deref<T>()); + + S.Stk.push<T>(Ptr.deref<T>()); + return true; +} + +/// Same as GetGlobal, but without the checks. +template <PrimType Name, class T = typename PrimConv<Name>::T> +bool GetGlobalUnchecked(InterpState &S, CodePtr OpPC, uint32_t I) { + const Pointer &Ptr = S.P.getPtrGlobal(I); + if (!Ptr.isInitialized()) + return false; + S.Stk.push<T>(Ptr.deref<T>()); return true; } @@ -417,10 +1317,56 @@ bool SetGlobal(InterpState &S, CodePtr OpPC, uint32_t I) { template <PrimType Name, class T = typename PrimConv<Name>::T> bool InitGlobal(InterpState &S, CodePtr OpPC, uint32_t I) { - S.P.getGlobal(I)->deref<T>() = S.Stk.pop<T>(); + const Pointer &P = S.P.getGlobal(I); + P.deref<T>() = S.Stk.pop<T>(); + P.initialize(); return true; } +/// 1) Converts the value on top of the stack to an APValue +/// 2) Sets that APValue on \Temp +/// 3) Initializes global with index \I with that +template <PrimType Name, class T = typename PrimConv<Name>::T> +bool InitGlobalTemp(InterpState &S, CodePtr OpPC, uint32_t I, + const LifetimeExtendedTemporaryDecl *Temp) { + const Pointer &Ptr = S.P.getGlobal(I); + + const T Value = S.Stk.peek<T>(); + APValue APV = Value.toAPValue(S.getCtx()); + APValue *Cached = Temp->getOrCreateValue(true); + *Cached = APV; + + assert(Ptr.getDeclDesc()->asExpr()); + + S.SeenGlobalTemporaries.push_back( + std::make_pair(Ptr.getDeclDesc()->asExpr(), Temp)); + + Ptr.deref<T>() = S.Stk.pop<T>(); + Ptr.initialize(); + return true; +} + +/// 1) Converts the value on top of the stack to an APValue +/// 2) Sets that APValue on \Temp +/// 3) Initialized global with index \I with that +inline bool InitGlobalTempComp(InterpState &S, CodePtr OpPC, + const LifetimeExtendedTemporaryDecl *Temp) { + assert(Temp); + const Pointer &P = S.Stk.peek<Pointer>(); + APValue *Cached = Temp->getOrCreateValue(true); + + S.SeenGlobalTemporaries.push_back( + std::make_pair(P.getDeclDesc()->asExpr(), Temp)); + + if (std::optional<APValue> APV = + P.toRValue(S.getCtx(), Temp->getTemporaryExpr()->getType())) { + *Cached = *APV; + return true; + } + + return false; +} + template <PrimType Name, class T = typename PrimConv<Name>::T> bool InitThisField(InterpState &S, CodePtr OpPC, uint32_t I) { if (S.checkingPotentialConstantExpression()) @@ -434,14 +1380,18 @@ bool InitThisField(InterpState &S, CodePtr OpPC, uint32_t I) { return true; } +// FIXME: The Field pointer here is too much IMO and we could instead just +// pass an Offset + BitWidth pair. template <PrimType Name, class T = typename PrimConv<Name>::T> -bool InitThisBitField(InterpState &S, CodePtr OpPC, const Record::Field *F) { +bool InitThisBitField(InterpState &S, CodePtr OpPC, const Record::Field *F, + uint32_t FieldOffset) { + assert(F->isBitField()); if (S.checkingPotentialConstantExpression()) return false; const Pointer &This = S.Current->getThis(); if (!CheckThis(S, OpPC, This)) return false; - const Pointer &Field = This.atField(F->Offset); + const Pointer &Field = This.atField(FieldOffset); const auto &Value = S.Stk.pop<T>(); Field.deref<T>() = Value.truncate(F->Decl->getBitWidthValue(S.getCtx())); Field.initialize(); @@ -462,10 +1412,13 @@ bool InitThisFieldActive(InterpState &S, CodePtr OpPC, uint32_t I) { return true; } +/// 1) Pops the value from the stack +/// 2) Peeks a pointer from the stack +/// 3) Pushes the value to field I of the pointer on the stack template <PrimType Name, class T = typename PrimConv<Name>::T> bool InitField(InterpState &S, CodePtr OpPC, uint32_t I) { const T &Value = S.Stk.pop<T>(); - const Pointer &Field = S.Stk.pop<Pointer>().atField(I); + const Pointer &Field = S.Stk.peek<Pointer>().atField(I); Field.deref<T>() = Value; Field.activate(); Field.initialize(); @@ -474,8 +1427,9 @@ bool InitField(InterpState &S, CodePtr OpPC, uint32_t I) { template <PrimType Name, class T = typename PrimConv<Name>::T> bool InitBitField(InterpState &S, CodePtr OpPC, const Record::Field *F) { + assert(F->isBitField()); const T &Value = S.Stk.pop<T>(); - const Pointer &Field = S.Stk.pop<Pointer>().atField(F->Offset); + const Pointer &Field = S.Stk.peek<Pointer>().atField(F->Offset); Field.deref<T>() = Value.truncate(F->Decl->getBitWidthValue(S.getCtx())); Field.activate(); Field.initialize(); @@ -515,14 +1469,49 @@ inline bool GetPtrGlobal(InterpState &S, CodePtr OpPC, uint32_t I) { return true; } +/// 1) Peeks a Pointer +/// 2) Pushes Pointer.atField(Off) on the stack inline bool GetPtrField(InterpState &S, CodePtr OpPC, uint32_t Off) { + const Pointer &Ptr = S.Stk.peek<Pointer>(); + + if (S.getLangOpts().CPlusPlus && S.inConstantContext() && + !CheckNull(S, OpPC, Ptr, CSK_Field)) + return false; + + if (!CheckExtern(S, OpPC, Ptr)) + return false; + if (!CheckRange(S, OpPC, Ptr, CSK_Field)) + return false; + if (!CheckArray(S, OpPC, Ptr)) + return false; + if (!CheckSubobject(S, OpPC, Ptr, CSK_Field)) + return false; + + if (Ptr.isBlockPointer() && Off > Ptr.block()->getSize()) + return false; + S.Stk.push<Pointer>(Ptr.atField(Off)); + return true; +} + +inline bool GetPtrFieldPop(InterpState &S, CodePtr OpPC, uint32_t Off) { const Pointer &Ptr = S.Stk.pop<Pointer>(); - if (!CheckNull(S, OpPC, Ptr, CSK_Field)) + + if (S.getLangOpts().CPlusPlus && S.inConstantContext() && + !CheckNull(S, OpPC, Ptr, CSK_Field)) return false; + if (!CheckExtern(S, OpPC, Ptr)) return false; if (!CheckRange(S, OpPC, Ptr, CSK_Field)) return false; + if (!CheckArray(S, OpPC, Ptr)) + return false; + if (!CheckSubobject(S, OpPC, Ptr, CSK_Field)) + return false; + + if (Ptr.isBlockPointer() && Off > Ptr.block()->getSize()) + return false; + S.Stk.push<Pointer>(Ptr.atField(Off)); return true; } @@ -563,14 +1552,45 @@ inline bool GetPtrActiveThisField(InterpState &S, CodePtr OpPC, uint32_t Off) { return true; } +inline bool GetPtrDerivedPop(InterpState &S, CodePtr OpPC, uint32_t Off) { + const Pointer &Ptr = S.Stk.pop<Pointer>(); + if (!CheckNull(S, OpPC, Ptr, CSK_Derived)) + return false; + if (!CheckSubobject(S, OpPC, Ptr, CSK_Derived)) + return false; + if (!CheckDowncast(S, OpPC, Ptr, Off)) + return false; + + S.Stk.push<Pointer>(Ptr.atFieldSub(Off)); + return true; +} + inline bool GetPtrBase(InterpState &S, CodePtr OpPC, uint32_t Off) { + const Pointer &Ptr = S.Stk.peek<Pointer>(); + if (!CheckNull(S, OpPC, Ptr, CSK_Base)) + return false; + if (!CheckSubobject(S, OpPC, Ptr, CSK_Base)) + return false; + S.Stk.push<Pointer>(Ptr.atField(Off)); + return true; +} + +inline bool GetPtrBasePop(InterpState &S, CodePtr OpPC, uint32_t Off) { const Pointer &Ptr = S.Stk.pop<Pointer>(); if (!CheckNull(S, OpPC, Ptr, CSK_Base)) return false; + if (!CheckSubobject(S, OpPC, Ptr, CSK_Base)) + return false; S.Stk.push<Pointer>(Ptr.atField(Off)); return true; } +inline bool GetMemberPtrBasePop(InterpState &S, CodePtr OpPC, int32_t Off) { + const auto &Ptr = S.Stk.pop<MemberPointer>(); + S.Stk.push<MemberPointer>(Ptr.atInstanceBase(Off)); + return true; +} + inline bool GetPtrThisBase(InterpState &S, CodePtr OpPC, uint32_t Off) { if (S.checkingPotentialConstantExpression()) return false; @@ -581,18 +1601,43 @@ inline bool GetPtrThisBase(InterpState &S, CodePtr OpPC, uint32_t Off) { return true; } +inline bool FinishInitPop(InterpState &S, CodePtr OpPC) { + const Pointer &Ptr = S.Stk.pop<Pointer>(); + if (Ptr.canBeInitialized()) { + Ptr.initialize(); + Ptr.activate(); + } + return true; +} + +inline bool FinishInit(InterpState &S, CodePtr OpPC) { + const Pointer &Ptr = S.Stk.peek<Pointer>(); + if (Ptr.canBeInitialized()) { + Ptr.initialize(); + Ptr.activate(); + } + return true; +} + +inline bool Dump(InterpState &S, CodePtr OpPC) { + S.Stk.dump(); + return true; +} + inline bool VirtBaseHelper(InterpState &S, CodePtr OpPC, const RecordDecl *Decl, const Pointer &Ptr) { Pointer Base = Ptr; while (Base.isBaseClass()) Base = Base.getBase(); - auto *Field = Base.getRecord()->getVirtualBase(Decl); - S.Stk.push<Pointer>(Base.atField(Field->Offset)); + const Record::Base *VirtBase = Base.getRecord()->getVirtualBase(Decl); + S.Stk.push<Pointer>(Base.atField(VirtBase->Offset)); return true; } -inline bool GetPtrVirtBase(InterpState &S, CodePtr OpPC, const RecordDecl *D) { +inline bool GetPtrVirtBasePop(InterpState &S, CodePtr OpPC, + const RecordDecl *D) { + assert(D); const Pointer &Ptr = S.Stk.pop<Pointer>(); if (!CheckNull(S, OpPC, Ptr, CSK_Base)) return false; @@ -601,6 +1646,7 @@ inline bool GetPtrVirtBase(InterpState &S, CodePtr OpPC, const RecordDecl *D) { inline bool GetPtrThisVirtBase(InterpState &S, CodePtr OpPC, const RecordDecl *D) { + assert(D); if (S.checkingPotentialConstantExpression()) return false; const Pointer &This = S.Current->getThis(); @@ -618,6 +1664,8 @@ bool Load(InterpState &S, CodePtr OpPC) { const Pointer &Ptr = S.Stk.peek<Pointer>(); if (!CheckLoad(S, OpPC, Ptr)) return false; + if (!Ptr.isBlockPointer()) + return false; S.Stk.push<T>(Ptr.deref<T>()); return true; } @@ -627,6 +1675,8 @@ bool LoadPop(InterpState &S, CodePtr OpPC) { const Pointer &Ptr = S.Stk.pop<Pointer>(); if (!CheckLoad(S, OpPC, Ptr)) return false; + if (!Ptr.isBlockPointer()) + return false; S.Stk.push<T>(Ptr.deref<T>()); return true; } @@ -637,6 +1687,8 @@ bool Store(InterpState &S, CodePtr OpPC) { const Pointer &Ptr = S.Stk.peek<Pointer>(); if (!CheckStore(S, OpPC, Ptr)) return false; + if (Ptr.canBeInitialized()) + Ptr.initialize(); Ptr.deref<T>() = Value; return true; } @@ -647,6 +1699,8 @@ bool StorePop(InterpState &S, CodePtr OpPC) { const Pointer &Ptr = S.Stk.pop<Pointer>(); if (!CheckStore(S, OpPC, Ptr)) return false; + if (Ptr.canBeInitialized()) + Ptr.initialize(); Ptr.deref<T>() = Value; return true; } @@ -657,11 +1711,12 @@ bool StoreBitField(InterpState &S, CodePtr OpPC) { const Pointer &Ptr = S.Stk.peek<Pointer>(); if (!CheckStore(S, OpPC, Ptr)) return false; - if (auto *FD = Ptr.getField()) { + if (Ptr.canBeInitialized()) + Ptr.initialize(); + if (const auto *FD = Ptr.getField()) Ptr.deref<T>() = Value.truncate(FD->getBitWidthValue(S.getCtx())); - } else { + else Ptr.deref<T>() = Value; - } return true; } @@ -671,11 +1726,25 @@ bool StoreBitFieldPop(InterpState &S, CodePtr OpPC) { const Pointer &Ptr = S.Stk.pop<Pointer>(); if (!CheckStore(S, OpPC, Ptr)) return false; - if (auto *FD = Ptr.getField()) { + if (Ptr.canBeInitialized()) + Ptr.initialize(); + if (const auto *FD = Ptr.getField()) Ptr.deref<T>() = Value.truncate(FD->getBitWidthValue(S.getCtx())); - } else { + else Ptr.deref<T>() = Value; + return true; +} + +template <PrimType Name, class T = typename PrimConv<Name>::T> +bool Init(InterpState &S, CodePtr OpPC) { + const T &Value = S.Stk.pop<T>(); + const Pointer &Ptr = S.Stk.peek<Pointer>(); + if (!CheckInit(S, OpPC, Ptr)) { + assert(false); + return false; } + Ptr.initialize(); + new (&Ptr.deref<T>()) T(Value); return true; } @@ -690,10 +1759,15 @@ bool InitPop(InterpState &S, CodePtr OpPC) { return true; } +/// 1) Pops the value from the stack +/// 2) Peeks a pointer and gets its index \Idx +/// 3) Sets the value on the pointer, leaving the pointer on the stack. template <PrimType Name, class T = typename PrimConv<Name>::T> bool InitElem(InterpState &S, CodePtr OpPC, uint32_t Idx) { const T &Value = S.Stk.pop<T>(); const Pointer &Ptr = S.Stk.peek<Pointer>().atIndex(Idx); + if (Ptr.isUnknownSizeArray()) + return false; if (!CheckInit(S, OpPC, Ptr)) return false; Ptr.initialize(); @@ -701,10 +1775,13 @@ bool InitElem(InterpState &S, CodePtr OpPC, uint32_t Idx) { return true; } +/// The same as InitElem, but pops the pointer as well. template <PrimType Name, class T = typename PrimConv<Name>::T> bool InitElemPop(InterpState &S, CodePtr OpPC, uint32_t Idx) { const T &Value = S.Stk.pop<T>(); const Pointer &Ptr = S.Stk.pop<Pointer>().atIndex(Idx); + if (Ptr.isUnknownSizeArray()) + return false; if (!CheckInit(S, OpPC, Ptr)) return false; Ptr.initialize(); @@ -712,78 +1789,213 @@ bool InitElemPop(InterpState &S, CodePtr OpPC, uint32_t Idx) { return true; } +inline bool Memcpy(InterpState &S, CodePtr OpPC) { + const Pointer &Src = S.Stk.pop<Pointer>(); + Pointer &Dest = S.Stk.peek<Pointer>(); + + if (!CheckLoad(S, OpPC, Src)) + return false; + + return DoMemcpy(S, OpPC, Src, Dest); +} + +inline bool ToMemberPtr(InterpState &S, CodePtr OpPC) { + const auto &Member = S.Stk.pop<MemberPointer>(); + const auto &Base = S.Stk.pop<Pointer>(); + + S.Stk.push<MemberPointer>(Member.takeInstance(Base)); + return true; +} + +inline bool CastMemberPtrPtr(InterpState &S, CodePtr OpPC) { + const auto &MP = S.Stk.pop<MemberPointer>(); + + if (std::optional<Pointer> Ptr = MP.toPointer(S.Ctx)) { + S.Stk.push<Pointer>(*Ptr); + return true; + } + return false; +} + //===----------------------------------------------------------------------===// // AddOffset, SubOffset //===----------------------------------------------------------------------===// -template <class T, bool Add> bool OffsetHelper(InterpState &S, CodePtr OpPC) { - // Fetch the pointer and the offset. - const T &Offset = S.Stk.pop<T>(); - const Pointer &Ptr = S.Stk.pop<Pointer>(); - if (!CheckNull(S, OpPC, Ptr, CSK_ArrayIndex)) - return false; - if (!CheckRange(S, OpPC, Ptr, CSK_ArrayToPointer)) - return false; - - // Get a version of the index comparable to the type. - T Index = T::from(Ptr.getIndex(), Offset.bitWidth()); - // A zero offset does not change the pointer, but in the case of an array - // it has to be adjusted to point to the first element instead of the array. +template <class T, ArithOp Op> +bool OffsetHelper(InterpState &S, CodePtr OpPC, const T &Offset, + const Pointer &Ptr) { + // A zero offset does not change the pointer. if (Offset.isZero()) { - S.Stk.push<Pointer>(Index.isZero() ? Ptr.atIndex(0) : Ptr); + S.Stk.push<Pointer>(Ptr); return true; } + + if (!CheckNull(S, OpPC, Ptr, CSK_ArrayIndex)) { + // The CheckNull will have emitted a note already, but we only + // abort in C++, since this is fine in C. + if (S.getLangOpts().CPlusPlus) + return false; + } + // Arrays of unknown bounds cannot have pointers into them. if (!CheckArray(S, OpPC, Ptr)) return false; - // Compute the largest index into the array. - unsigned MaxIndex = Ptr.getNumElems(); + uint64_t MaxIndex = static_cast<uint64_t>(Ptr.getNumElems()); + uint64_t Index; + if (Ptr.isOnePastEnd()) + Index = MaxIndex; + else + Index = Ptr.getIndex(); + bool Invalid = false; // Helper to report an invalid offset, computed as APSInt. - auto InvalidOffset = [&]() { + auto DiagInvalidOffset = [&]() -> void { const unsigned Bits = Offset.bitWidth(); - APSInt APOffset(Offset.toAPSInt().extend(Bits + 2), false); - APSInt APIndex(Index.toAPSInt().extend(Bits + 2), false); - APSInt NewIndex = Add ? (APIndex + APOffset) : (APIndex - APOffset); + APSInt APOffset(Offset.toAPSInt().extend(Bits + 2), /*IsUnsigend=*/false); + APSInt APIndex(APInt(Bits + 2, Index, /*IsSigned=*/true), + /*IsUnsigned=*/false); + APSInt NewIndex = + (Op == ArithOp::Add) ? (APIndex + APOffset) : (APIndex - APOffset); S.CCEDiag(S.Current->getSource(OpPC), diag::note_constexpr_array_index) - << NewIndex - << /*array*/ static_cast<int>(!Ptr.inArray()) - << static_cast<unsigned>(MaxIndex); - return false; + << NewIndex << /*array*/ static_cast<int>(!Ptr.inArray()) << MaxIndex; + Invalid = true; }; - // If the new offset would be negative, bail out. - if (Add && Offset.isNegative() && (Offset.isMin() || -Offset > Index)) - return InvalidOffset(); - if (!Add && Offset.isPositive() && Index < Offset) - return InvalidOffset(); + if (Ptr.isBlockPointer()) { + uint64_t IOffset = static_cast<uint64_t>(Offset); + uint64_t MaxOffset = MaxIndex - Index; + + if constexpr (Op == ArithOp::Add) { + // If the new offset would be negative, bail out. + if (Offset.isNegative() && (Offset.isMin() || -IOffset > Index)) + DiagInvalidOffset(); + + // If the new offset would be out of bounds, bail out. + if (Offset.isPositive() && IOffset > MaxOffset) + DiagInvalidOffset(); + } else { + // If the new offset would be negative, bail out. + if (Offset.isPositive() && Index < IOffset) + DiagInvalidOffset(); + + // If the new offset would be out of bounds, bail out. + if (Offset.isNegative() && (Offset.isMin() || -IOffset > MaxOffset)) + DiagInvalidOffset(); + } + } - // If the new offset would be out of bounds, bail out. - unsigned MaxOffset = MaxIndex - Ptr.getIndex(); - if (Add && Offset.isPositive() && Offset > MaxOffset) - return InvalidOffset(); - if (!Add && Offset.isNegative() && (Offset.isMin() || -Offset > MaxOffset)) - return InvalidOffset(); + if (Invalid && S.getLangOpts().CPlusPlus) + return false; // Offset is valid - compute it on unsigned. int64_t WideIndex = static_cast<int64_t>(Index); int64_t WideOffset = static_cast<int64_t>(Offset); - int64_t Result = Add ? (WideIndex + WideOffset) : (WideIndex - WideOffset); - S.Stk.push<Pointer>(Ptr.atIndex(static_cast<unsigned>(Result))); + int64_t Result; + if constexpr (Op == ArithOp::Add) + Result = WideIndex + WideOffset; + else + Result = WideIndex - WideOffset; + + // When the pointer is one-past-end, going back to index 0 is the only + // useful thing we can do. Any other index has been diagnosed before and + // we don't get here. + if (Result == 0 && Ptr.isOnePastEnd()) { + S.Stk.push<Pointer>(Ptr.asBlockPointer().Pointee, + Ptr.asBlockPointer().Base); + return true; + } + + S.Stk.push<Pointer>(Ptr.atIndex(static_cast<uint64_t>(Result))); return true; } template <PrimType Name, class T = typename PrimConv<Name>::T> bool AddOffset(InterpState &S, CodePtr OpPC) { - return OffsetHelper<T, true>(S, OpPC); + const T &Offset = S.Stk.pop<T>(); + const Pointer &Ptr = S.Stk.pop<Pointer>(); + return OffsetHelper<T, ArithOp::Add>(S, OpPC, Offset, Ptr); } template <PrimType Name, class T = typename PrimConv<Name>::T> bool SubOffset(InterpState &S, CodePtr OpPC) { - return OffsetHelper<T, false>(S, OpPC); + const T &Offset = S.Stk.pop<T>(); + const Pointer &Ptr = S.Stk.pop<Pointer>(); + return OffsetHelper<T, ArithOp::Sub>(S, OpPC, Offset, Ptr); +} + +template <ArithOp Op> +static inline bool IncDecPtrHelper(InterpState &S, CodePtr OpPC, + const Pointer &Ptr) { + if (Ptr.isDummy()) + return false; + + using OneT = Integral<8, false>; + + const Pointer &P = Ptr.deref<Pointer>(); + if (!CheckNull(S, OpPC, P, CSK_ArrayIndex)) + return false; + + // Get the current value on the stack. + S.Stk.push<Pointer>(P); + + // Now the current Ptr again and a constant 1. + OneT One = OneT::from(1); + if (!OffsetHelper<OneT, Op>(S, OpPC, One, P)) + return false; + + // Store the new value. + Ptr.deref<Pointer>() = S.Stk.pop<Pointer>(); + return true; } +static inline bool IncPtr(InterpState &S, CodePtr OpPC) { + const Pointer &Ptr = S.Stk.pop<Pointer>(); + + if (!CheckInitialized(S, OpPC, Ptr, AK_Increment)) + return false; + + return IncDecPtrHelper<ArithOp::Add>(S, OpPC, Ptr); +} + +static inline bool DecPtr(InterpState &S, CodePtr OpPC) { + const Pointer &Ptr = S.Stk.pop<Pointer>(); + + if (!CheckInitialized(S, OpPC, Ptr, AK_Decrement)) + return false; + + return IncDecPtrHelper<ArithOp::Sub>(S, OpPC, Ptr); +} + +/// 1) Pops a Pointer from the stack. +/// 2) Pops another Pointer from the stack. +/// 3) Pushes the different of the indices of the two pointers on the stack. +template <PrimType Name, class T = typename PrimConv<Name>::T> +inline bool SubPtr(InterpState &S, CodePtr OpPC) { + const Pointer &LHS = S.Stk.pop<Pointer>(); + const Pointer &RHS = S.Stk.pop<Pointer>(); + + if (RHS.isZero()) { + S.Stk.push<T>(T::from(LHS.getIndex())); + return true; + } + + if (!Pointer::hasSameBase(LHS, RHS) && S.getLangOpts().CPlusPlus) { + // TODO: Diagnose. + return false; + } + + if (LHS.isZero() && RHS.isZero()) { + S.Stk.push<T>(); + return true; + } + + T A = LHS.isElementPastEnd() ? T::from(LHS.getNumElems()) + : T::from(LHS.getIndex()); + T B = RHS.isElementPastEnd() ? T::from(RHS.getNumElems()) + : T::from(RHS.getIndex()); + return AddSubMulHelper<T, T::sub, std::minus>(S, OpPC, A.bitWidth(), A, B); +} //===----------------------------------------------------------------------===// // Destroy @@ -805,6 +2017,185 @@ template <PrimType TIn, PrimType TOut> bool Cast(InterpState &S, CodePtr OpPC) { return true; } +/// 1) Pops a Floating from the stack. +/// 2) Pushes a new floating on the stack that uses the given semantics. +inline bool CastFP(InterpState &S, CodePtr OpPC, const llvm::fltSemantics *Sem, + llvm::RoundingMode RM) { + Floating F = S.Stk.pop<Floating>(); + Floating Result = F.toSemantics(Sem, RM); + S.Stk.push<Floating>(Result); + return true; +} + +/// Like Cast(), but we cast to an arbitrary-bitwidth integral, so we need +/// to know what bitwidth the result should be. +template <PrimType Name, class T = typename PrimConv<Name>::T> +bool CastAP(InterpState &S, CodePtr OpPC, uint32_t BitWidth) { + S.Stk.push<IntegralAP<false>>( + IntegralAP<false>::from(S.Stk.pop<T>(), BitWidth)); + return true; +} + +template <PrimType Name, class T = typename PrimConv<Name>::T> +bool CastAPS(InterpState &S, CodePtr OpPC, uint32_t BitWidth) { + S.Stk.push<IntegralAP<true>>( + IntegralAP<true>::from(S.Stk.pop<T>(), BitWidth)); + return true; +} + +template <PrimType Name, class T = typename PrimConv<Name>::T> +bool CastIntegralFloating(InterpState &S, CodePtr OpPC, + const llvm::fltSemantics *Sem, + llvm::RoundingMode RM) { + const T &From = S.Stk.pop<T>(); + APSInt FromAP = From.toAPSInt(); + Floating Result; + + auto Status = Floating::fromIntegral(FromAP, *Sem, RM, Result); + S.Stk.push<Floating>(Result); + + return CheckFloatResult(S, OpPC, Result, Status); +} + +template <PrimType Name, class T = typename PrimConv<Name>::T> +bool CastFloatingIntegral(InterpState &S, CodePtr OpPC) { + const Floating &F = S.Stk.pop<Floating>(); + + if constexpr (std::is_same_v<T, Boolean>) { + S.Stk.push<T>(T(F.isNonZero())); + return true; + } else { + APSInt Result(std::max(8u, T::bitWidth()), + /*IsUnsigned=*/!T::isSigned()); + auto Status = F.convertToInteger(Result); + + // Float-to-Integral overflow check. + if ((Status & APFloat::opStatus::opInvalidOp)) { + const Expr *E = S.Current->getExpr(OpPC); + QualType Type = E->getType(); + + S.CCEDiag(E, diag::note_constexpr_overflow) << F.getAPFloat() << Type; + if (S.noteUndefinedBehavior()) { + S.Stk.push<T>(T(Result)); + return true; + } + return false; + } + + S.Stk.push<T>(T(Result)); + return CheckFloatResult(S, OpPC, F, Status); + } +} + +static inline bool CastFloatingIntegralAP(InterpState &S, CodePtr OpPC, + uint32_t BitWidth) { + const Floating &F = S.Stk.pop<Floating>(); + + APSInt Result(BitWidth, /*IsUnsigned=*/true); + auto Status = F.convertToInteger(Result); + + // Float-to-Integral overflow check. + if ((Status & APFloat::opStatus::opInvalidOp) && F.isFinite()) { + const Expr *E = S.Current->getExpr(OpPC); + QualType Type = E->getType(); + + S.CCEDiag(E, diag::note_constexpr_overflow) << F.getAPFloat() << Type; + return S.noteUndefinedBehavior(); + } + + S.Stk.push<IntegralAP<true>>(IntegralAP<true>(Result)); + return CheckFloatResult(S, OpPC, F, Status); +} + +static inline bool CastFloatingIntegralAPS(InterpState &S, CodePtr OpPC, + uint32_t BitWidth) { + const Floating &F = S.Stk.pop<Floating>(); + + APSInt Result(BitWidth, /*IsUnsigned=*/false); + auto Status = F.convertToInteger(Result); + + // Float-to-Integral overflow check. + if ((Status & APFloat::opStatus::opInvalidOp) && F.isFinite()) { + const Expr *E = S.Current->getExpr(OpPC); + QualType Type = E->getType(); + + S.CCEDiag(E, diag::note_constexpr_overflow) << F.getAPFloat() << Type; + return S.noteUndefinedBehavior(); + } + + S.Stk.push<IntegralAP<true>>(IntegralAP<true>(Result)); + return CheckFloatResult(S, OpPC, F, Status); +} + +template <PrimType Name, class T = typename PrimConv<Name>::T> +bool CastPointerIntegral(InterpState &S, CodePtr OpPC) { + const Pointer &Ptr = S.Stk.pop<Pointer>(); + + if (Ptr.isDummy()) + return false; + + const SourceInfo &E = S.Current->getSource(OpPC); + S.CCEDiag(E, diag::note_constexpr_invalid_cast) + << 2 << S.getLangOpts().CPlusPlus << S.Current->getRange(OpPC); + + S.Stk.push<T>(T::from(Ptr.getIntegerRepresentation())); + return true; +} + +static inline bool CastPointerIntegralAP(InterpState &S, CodePtr OpPC, + uint32_t BitWidth) { + const Pointer &Ptr = S.Stk.pop<Pointer>(); + + if (Ptr.isDummy()) + return false; + + const SourceInfo &E = S.Current->getSource(OpPC); + S.CCEDiag(E, diag::note_constexpr_invalid_cast) + << 2 << S.getLangOpts().CPlusPlus << S.Current->getRange(OpPC); + + S.Stk.push<IntegralAP<false>>( + IntegralAP<false>::from(Ptr.getIntegerRepresentation(), BitWidth)); + return true; +} + +static inline bool CastPointerIntegralAPS(InterpState &S, CodePtr OpPC, + uint32_t BitWidth) { + const Pointer &Ptr = S.Stk.pop<Pointer>(); + + if (Ptr.isDummy()) + return false; + + const SourceInfo &E = S.Current->getSource(OpPC); + S.CCEDiag(E, diag::note_constexpr_invalid_cast) + << 2 << S.getLangOpts().CPlusPlus << S.Current->getRange(OpPC); + + S.Stk.push<IntegralAP<true>>( + IntegralAP<true>::from(Ptr.getIntegerRepresentation(), BitWidth)); + return true; +} + +static inline bool PtrPtrCast(InterpState &S, CodePtr OpPC, bool SrcIsVoidPtr) { + const auto &Ptr = S.Stk.peek<Pointer>(); + + if (SrcIsVoidPtr && S.getLangOpts().CPlusPlus) { + bool HasValidResult = !Ptr.isZero(); + + if (HasValidResult) { + // FIXME: note_constexpr_invalid_void_star_cast + } else if (!S.getLangOpts().CPlusPlus26) { + const SourceInfo &E = S.Current->getSource(OpPC); + S.CCEDiag(E, diag::note_constexpr_invalid_cast) + << 3 << "'void *'" << S.Current->getRange(OpPC); + } + } else { + const SourceInfo &E = S.Current->getSource(OpPC); + S.CCEDiag(E, diag::note_constexpr_invalid_cast) + << 2 << S.getLangOpts().CPlusPlus << S.Current->getRange(OpPC); + } + + return true; +} + //===----------------------------------------------------------------------===// // Zero, Nullptr //===----------------------------------------------------------------------===// @@ -815,9 +2206,20 @@ bool Zero(InterpState &S, CodePtr OpPC) { return true; } +static inline bool ZeroIntAP(InterpState &S, CodePtr OpPC, uint32_t BitWidth) { + S.Stk.push<IntegralAP<false>>(IntegralAP<false>::zero(BitWidth)); + return true; +} + +static inline bool ZeroIntAPS(InterpState &S, CodePtr OpPC, uint32_t BitWidth) { + S.Stk.push<IntegralAP<true>>(IntegralAP<true>::zero(BitWidth)); + return true; +} + template <PrimType Name, class T = typename PrimConv<Name>::T> -inline bool Null(InterpState &S, CodePtr OpPC) { - S.Stk.push<T>(); +inline bool Null(InterpState &S, CodePtr OpPC, const Descriptor *Desc) { + // Note: Desc can be null. + S.Stk.push<T>(0, Desc); return true; } @@ -835,92 +2237,112 @@ inline bool This(InterpState &S, CodePtr OpPC) { if (!CheckThis(S, OpPC, This)) return false; + // Ensure the This pointer has been cast to the correct base. + if (!This.isDummy()) { + assert(isa<CXXMethodDecl>(S.Current->getFunction()->getDecl())); + assert(This.getRecord()); + assert( + This.getRecord()->getDecl() == + cast<CXXMethodDecl>(S.Current->getFunction()->getDecl())->getParent()); + } + S.Stk.push<Pointer>(This); return true; } +inline bool RVOPtr(InterpState &S, CodePtr OpPC) { + assert(S.Current->getFunction()->hasRVO()); + if (S.checkingPotentialConstantExpression()) + return false; + S.Stk.push<Pointer>(S.Current->getRVOPtr()); + return true; +} + //===----------------------------------------------------------------------===// // Shr, Shl //===----------------------------------------------------------------------===// +enum class ShiftDir { Left, Right }; -template <PrimType TR, PrimType TL, class T = typename PrimConv<TR>::T> -unsigned Trunc(InterpState &S, CodePtr OpPC, unsigned Bits, const T &V) { - // C++11 [expr.shift]p1: Shift width must be less than the bit width of - // the shifted type. - if (Bits > 1 && V >= T::from(Bits, V.bitWidth())) { - const Expr *E = S.Current->getExpr(OpPC); - const APSInt Val = V.toAPSInt(); - QualType Ty = E->getType(); - S.CCEDiag(E, diag::note_constexpr_large_shift) << Val << Ty << Bits; - return Bits; - } else { - return static_cast<unsigned>(V); - } -} +template <class LT, class RT, ShiftDir Dir> +inline bool DoShift(InterpState &S, CodePtr OpPC, LT &LHS, RT &RHS) { + const unsigned Bits = LHS.bitWidth(); -template <PrimType TL, PrimType TR, typename T = typename PrimConv<TL>::T> -inline bool ShiftRight(InterpState &S, CodePtr OpPC, const T &V, unsigned RHS) { - if (RHS >= V.bitWidth()) { - S.Stk.push<T>(T::from(0, V.bitWidth())); - } else { - S.Stk.push<T>(T::from(V >> RHS, V.bitWidth())); + // OpenCL 6.3j: shift values are effectively % word size of LHS. + if (S.getLangOpts().OpenCL) + RT::bitAnd(RHS, RT::from(LHS.bitWidth() - 1, RHS.bitWidth()), + RHS.bitWidth(), &RHS); + + if (RHS.isNegative()) { + // During constant-folding, a negative shift is an opposite shift. Such a + // shift is not a constant expression. + const SourceInfo &Loc = S.Current->getSource(OpPC); + S.CCEDiag(Loc, diag::note_constexpr_negative_shift) << RHS.toAPSInt(); + if (!S.noteUndefinedBehavior()) + return false; + RHS = -RHS; + return DoShift < LT, RT, + Dir == ShiftDir::Left ? ShiftDir::Right + : ShiftDir::Left > (S, OpPC, LHS, RHS); } - return true; -} -template <PrimType TL, PrimType TR, typename T = typename PrimConv<TL>::T> -inline bool ShiftLeft(InterpState &S, CodePtr OpPC, const T &V, unsigned RHS) { - if (V.isSigned() && !S.getLangOpts().CPlusPlus20) { - // C++11 [expr.shift]p2: A signed left shift must have a non-negative - // operand, and must not overflow the corresponding unsigned type. - // C++2a [expr.shift]p2: E1 << E2 is the unique value congruent to - // E1 x 2^E2 module 2^N. - if (V.isNegative()) { - const Expr *E = S.Current->getExpr(OpPC); - S.CCEDiag(E, diag::note_constexpr_lshift_of_negative) << V.toAPSInt(); - } else if (V.countLeadingZeros() < RHS) { - S.CCEDiag(S.Current->getExpr(OpPC), diag::note_constexpr_lshift_discards); + if constexpr (Dir == ShiftDir::Left) { + if (LHS.isNegative() && !S.getLangOpts().CPlusPlus20) { + // C++11 [expr.shift]p2: A signed left shift must have a non-negative + // operand, and must not overflow the corresponding unsigned type. + // C++2a [expr.shift]p2: E1 << E2 is the unique value congruent to + // E1 x 2^E2 module 2^N. + const SourceInfo &Loc = S.Current->getSource(OpPC); + S.CCEDiag(Loc, diag::note_constexpr_lshift_of_negative) << LHS.toAPSInt(); + if (!S.noteUndefinedBehavior()) + return false; } } - if (V.bitWidth() == 1) { - S.Stk.push<T>(V); - } else if (RHS >= V.bitWidth()) { - S.Stk.push<T>(T::from(0, V.bitWidth())); + if (!CheckShift(S, OpPC, LHS, RHS, Bits)) + return false; + + // Limit the shift amount to Bits - 1. If this happened, + // it has already been diagnosed by CheckShift() above, + // but we still need to handle it. + typename LT::AsUnsigned R; + if constexpr (Dir == ShiftDir::Left) { + if (RHS > RT::from(Bits - 1, RHS.bitWidth())) + LT::AsUnsigned::shiftLeft(LT::AsUnsigned::from(LHS), + LT::AsUnsigned::from(Bits - 1), Bits, &R); + else + LT::AsUnsigned::shiftLeft(LT::AsUnsigned::from(LHS), + LT::AsUnsigned::from(RHS, Bits), Bits, &R); } else { - S.Stk.push<T>(T::from(V.toUnsigned() << RHS, V.bitWidth())); + if (RHS > RT::from(Bits - 1, RHS.bitWidth())) + LT::AsUnsigned::shiftRight(LT::AsUnsigned::from(LHS), + LT::AsUnsigned::from(Bits - 1), Bits, &R); + else + LT::AsUnsigned::shiftRight(LT::AsUnsigned::from(LHS), + LT::AsUnsigned::from(RHS, Bits), Bits, &R); } + + S.Stk.push<LT>(LT::from(R)); return true; } -template <PrimType TL, PrimType TR> +template <PrimType NameL, PrimType NameR> inline bool Shr(InterpState &S, CodePtr OpPC) { - const auto &RHS = S.Stk.pop<typename PrimConv<TR>::T>(); - const auto &LHS = S.Stk.pop<typename PrimConv<TL>::T>(); - const unsigned Bits = LHS.bitWidth(); + using LT = typename PrimConv<NameL>::T; + using RT = typename PrimConv<NameR>::T; + auto RHS = S.Stk.pop<RT>(); + auto LHS = S.Stk.pop<LT>(); - if (RHS.isSigned() && RHS.isNegative()) { - const SourceInfo &Loc = S.Current->getSource(OpPC); - S.CCEDiag(Loc, diag::note_constexpr_negative_shift) << RHS.toAPSInt(); - return ShiftLeft<TL, TR>(S, OpPC, LHS, Trunc<TR, TL>(S, OpPC, Bits, -RHS)); - } else { - return ShiftRight<TL, TR>(S, OpPC, LHS, Trunc<TR, TL>(S, OpPC, Bits, RHS)); - } + return DoShift<LT, RT, ShiftDir::Right>(S, OpPC, LHS, RHS); } -template <PrimType TL, PrimType TR> +template <PrimType NameL, PrimType NameR> inline bool Shl(InterpState &S, CodePtr OpPC) { - const auto &RHS = S.Stk.pop<typename PrimConv<TR>::T>(); - const auto &LHS = S.Stk.pop<typename PrimConv<TL>::T>(); - const unsigned Bits = LHS.bitWidth(); + using LT = typename PrimConv<NameL>::T; + using RT = typename PrimConv<NameR>::T; + auto RHS = S.Stk.pop<RT>(); + auto LHS = S.Stk.pop<LT>(); - if (RHS.isSigned() && RHS.isNegative()) { - const SourceInfo &Loc = S.Current->getSource(OpPC); - S.CCEDiag(Loc, diag::note_constexpr_negative_shift) << RHS.toAPSInt(); - return ShiftRight<TL, TR>(S, OpPC, LHS, Trunc<TR, TL>(S, OpPC, Bits, -RHS)); - } else { - return ShiftLeft<TL, TR>(S, OpPC, LHS, Trunc<TR, TL>(S, OpPC, Bits, RHS)); - } + return DoShift<LT, RT, ShiftDir::Left>(S, OpPC, LHS, RHS); } //===----------------------------------------------------------------------===// @@ -949,8 +2371,626 @@ inline bool ExpandPtr(InterpState &S, CodePtr OpPC) { return true; } -/// Interpreter entry point. -bool Interpret(InterpState &S, APValue &Result); +// 1) Pops an integral value from the stack +// 2) Peeks a pointer +// 3) Pushes a new pointer that's a narrowed array +// element of the peeked pointer with the value +// from 1) added as offset. +// +// This leaves the original pointer on the stack and pushes a new one +// with the offset applied and narrowed. +template <PrimType Name, class T = typename PrimConv<Name>::T> +inline bool ArrayElemPtr(InterpState &S, CodePtr OpPC) { + const T &Offset = S.Stk.pop<T>(); + const Pointer &Ptr = S.Stk.peek<Pointer>(); + + if (!Ptr.isZero()) { + if (!CheckArray(S, OpPC, Ptr)) + return false; + } + + if (!OffsetHelper<T, ArithOp::Add>(S, OpPC, Offset, Ptr)) + return false; + + return NarrowPtr(S, OpPC); +} + +template <PrimType Name, class T = typename PrimConv<Name>::T> +inline bool ArrayElemPtrPop(InterpState &S, CodePtr OpPC) { + const T &Offset = S.Stk.pop<T>(); + const Pointer &Ptr = S.Stk.pop<Pointer>(); + + if (!Ptr.isZero()) { + if (!CheckArray(S, OpPC, Ptr)) + return false; + } + + if (!OffsetHelper<T, ArithOp::Add>(S, OpPC, Offset, Ptr)) + return false; + + return NarrowPtr(S, OpPC); +} + +template <PrimType Name, class T = typename PrimConv<Name>::T> +inline bool ArrayElem(InterpState &S, CodePtr OpPC, uint32_t Index) { + const Pointer &Ptr = S.Stk.peek<Pointer>(); + + if (!CheckLoad(S, OpPC, Ptr)) + return false; + + S.Stk.push<T>(Ptr.atIndex(Index).deref<T>()); + return true; +} + +template <PrimType Name, class T = typename PrimConv<Name>::T> +inline bool ArrayElemPop(InterpState &S, CodePtr OpPC, uint32_t Index) { + const Pointer &Ptr = S.Stk.pop<Pointer>(); + + if (!CheckLoad(S, OpPC, Ptr)) + return false; + + S.Stk.push<T>(Ptr.atIndex(Index).deref<T>()); + return true; +} + +template <PrimType Name, class T = typename PrimConv<Name>::T> +inline bool CopyArray(InterpState &S, CodePtr OpPC, uint32_t SrcIndex, uint32_t DestIndex, uint32_t Size) { + const auto &SrcPtr = S.Stk.pop<Pointer>(); + const auto &DestPtr = S.Stk.peek<Pointer>(); + + for (uint32_t I = 0; I != Size; ++I) { + const Pointer &SP = SrcPtr.atIndex(SrcIndex + I); + + if (!CheckLoad(S, OpPC, SP)) + return false; + + const Pointer &DP = DestPtr.atIndex(DestIndex + I); + DP.deref<T>() = SP.deref<T>(); + DP.initialize(); + } + return true; +} + +/// Just takes a pointer and checks if it's an incomplete +/// array type. +inline bool ArrayDecay(InterpState &S, CodePtr OpPC) { + const Pointer &Ptr = S.Stk.pop<Pointer>(); + + if (Ptr.isZero()) { + S.Stk.push<Pointer>(Ptr); + return true; + } + + if (!CheckRange(S, OpPC, Ptr, CSK_ArrayToPointer)) + return false; + + if (Ptr.isRoot() || !Ptr.isUnknownSizeArray() || Ptr.isDummy()) { + S.Stk.push<Pointer>(Ptr.atIndex(0)); + return true; + } + + const SourceInfo &E = S.Current->getSource(OpPC); + S.FFDiag(E, diag::note_constexpr_unsupported_unsized_array); + + return false; +} + +inline bool CallVar(InterpState &S, CodePtr OpPC, const Function *Func, + uint32_t VarArgSize) { + if (Func->hasThisPointer()) { + size_t ArgSize = Func->getArgSize() + VarArgSize; + size_t ThisOffset = ArgSize - (Func->hasRVO() ? primSize(PT_Ptr) : 0); + const Pointer &ThisPtr = S.Stk.peek<Pointer>(ThisOffset); + + // If the current function is a lambda static invoker and + // the function we're about to call is a lambda call operator, + // skip the CheckInvoke, since the ThisPtr is a null pointer + // anyway. + if (!(S.Current->getFunction() && + S.Current->getFunction()->isLambdaStaticInvoker() && + Func->isLambdaCallOperator())) { + if (!CheckInvoke(S, OpPC, ThisPtr)) + return false; + } + + if (S.checkingPotentialConstantExpression()) + return false; + } + + if (!CheckCallable(S, OpPC, Func)) + return false; + + if (!CheckCallDepth(S, OpPC)) + return false; + + auto NewFrame = std::make_unique<InterpFrame>(S, Func, OpPC, VarArgSize); + InterpFrame *FrameBefore = S.Current; + S.Current = NewFrame.get(); + + APValue CallResult; + // Note that we cannot assert(CallResult.hasValue()) here since + // Ret() above only sets the APValue if the curent frame doesn't + // have a caller set. + if (Interpret(S, CallResult)) { + NewFrame.release(); // Frame was delete'd already. + assert(S.Current == FrameBefore); + return true; + } + + // Interpreting the function failed somehow. Reset to + // previous state. + S.Current = FrameBefore; + return false; + + return false; +} + +inline bool Call(InterpState &S, CodePtr OpPC, const Function *Func, + uint32_t VarArgSize) { + if (Func->hasThisPointer()) { + size_t ArgSize = Func->getArgSize() + VarArgSize; + size_t ThisOffset = ArgSize - (Func->hasRVO() ? primSize(PT_Ptr) : 0); + + const Pointer &ThisPtr = S.Stk.peek<Pointer>(ThisOffset); + + // If the current function is a lambda static invoker and + // the function we're about to call is a lambda call operator, + // skip the CheckInvoke, since the ThisPtr is a null pointer + // anyway. + if (!(S.Current->getFunction() && + S.Current->getFunction()->isLambdaStaticInvoker() && + Func->isLambdaCallOperator())) { + if (!CheckInvoke(S, OpPC, ThisPtr)) + return false; + } + } + + if (!CheckCallable(S, OpPC, Func)) + return false; + + if (Func->hasThisPointer() && S.checkingPotentialConstantExpression()) + return false; + + if (!CheckCallDepth(S, OpPC)) + return false; + + auto NewFrame = std::make_unique<InterpFrame>(S, Func, OpPC, VarArgSize); + InterpFrame *FrameBefore = S.Current; + S.Current = NewFrame.get(); + + APValue CallResult; + // Note that we cannot assert(CallResult.hasValue()) here since + // Ret() above only sets the APValue if the curent frame doesn't + // have a caller set. + if (Interpret(S, CallResult)) { + NewFrame.release(); // Frame was delete'd already. + assert(S.Current == FrameBefore); + return true; + } + + // Interpreting the function failed somehow. Reset to + // previous state. + S.Current = FrameBefore; + return false; +} + +inline bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func, + uint32_t VarArgSize) { + assert(Func->hasThisPointer()); + assert(Func->isVirtual()); + size_t ArgSize = Func->getArgSize() + VarArgSize; + size_t ThisOffset = ArgSize - (Func->hasRVO() ? primSize(PT_Ptr) : 0); + Pointer &ThisPtr = S.Stk.peek<Pointer>(ThisOffset); + + QualType DynamicType = ThisPtr.getDeclDesc()->getType(); + const CXXRecordDecl *DynamicDecl; + if (DynamicType->isPointerType() || DynamicType->isReferenceType()) + DynamicDecl = DynamicType->getPointeeCXXRecordDecl(); + else + DynamicDecl = ThisPtr.getDeclDesc()->getType()->getAsCXXRecordDecl(); + const auto *StaticDecl = cast<CXXRecordDecl>(Func->getParentDecl()); + const auto *InitialFunction = cast<CXXMethodDecl>(Func->getDecl()); + const CXXMethodDecl *Overrider = S.getContext().getOverridingFunction( + DynamicDecl, StaticDecl, InitialFunction); + + if (Overrider != InitialFunction) { + // DR1872: An instantiated virtual constexpr function can't be called in a + // constant expression (prior to C++20). We can still constant-fold such a + // call. + if (!S.getLangOpts().CPlusPlus20 && Overrider->isVirtual()) { + const Expr *E = S.Current->getExpr(OpPC); + S.CCEDiag(E, diag::note_constexpr_virtual_call) << E->getSourceRange(); + } + + Func = S.getContext().getOrCreateFunction(Overrider); + + const CXXRecordDecl *ThisFieldDecl = + ThisPtr.getFieldDesc()->getType()->getAsCXXRecordDecl(); + if (Func->getParentDecl()->isDerivedFrom(ThisFieldDecl)) { + // If the function we call is further DOWN the hierarchy than the + // FieldDesc of our pointer, just get the DeclDesc instead, which + // is the furthest we might go up in the hierarchy. + ThisPtr = ThisPtr.getDeclPtr(); + } + } + + return Call(S, OpPC, Func, VarArgSize); +} + +inline bool CallBI(InterpState &S, CodePtr &PC, const Function *Func, + const CallExpr *CE) { + auto NewFrame = std::make_unique<InterpFrame>(S, Func, PC); + + InterpFrame *FrameBefore = S.Current; + S.Current = NewFrame.get(); + + if (InterpretBuiltin(S, PC, Func, CE)) { + NewFrame.release(); + return true; + } + S.Current = FrameBefore; + return false; +} + +inline bool CallPtr(InterpState &S, CodePtr OpPC, uint32_t ArgSize, + const CallExpr *CE) { + const FunctionPointer &FuncPtr = S.Stk.pop<FunctionPointer>(); + + const Function *F = FuncPtr.getFunction(); + if (!F) { + const Expr *E = S.Current->getExpr(OpPC); + S.FFDiag(E, diag::note_constexpr_null_callee) + << const_cast<Expr *>(E) << E->getSourceRange(); + return false; + } + + if (!FuncPtr.isValid()) + return false; + + assert(F); + + // This happens when the call expression has been cast to + // something else, but we don't support that. + if (S.Ctx.classify(F->getDecl()->getReturnType()) != + S.Ctx.classify(CE->getType())) + return false; + + // Check argument nullability state. + if (F->hasNonNullAttr()) { + if (!CheckNonNullArgs(S, OpPC, F, CE, ArgSize)) + return false; + } + + assert(ArgSize >= F->getWrittenArgSize()); + uint32_t VarArgSize = ArgSize - F->getWrittenArgSize(); + + // We need to do this explicitly here since we don't have the necessary + // information to do it automatically. + if (F->isThisPointerExplicit()) + VarArgSize -= align(primSize(PT_Ptr)); + + if (F->isVirtual()) + return CallVirt(S, OpPC, F, VarArgSize); + + return Call(S, OpPC, F, VarArgSize); +} + +inline bool GetFnPtr(InterpState &S, CodePtr OpPC, const Function *Func) { + assert(Func); + S.Stk.push<FunctionPointer>(Func); + return true; +} + +template <PrimType Name, class T = typename PrimConv<Name>::T> +inline bool GetIntPtr(InterpState &S, CodePtr OpPC, const Descriptor *Desc) { + const T &IntVal = S.Stk.pop<T>(); + + S.Stk.push<Pointer>(static_cast<uint64_t>(IntVal), Desc); + return true; +} + +inline bool GetMemberPtr(InterpState &S, CodePtr OpPC, const Decl *D) { + S.Stk.push<MemberPointer>(D); + return true; +} + +inline bool GetMemberPtrBase(InterpState &S, CodePtr OpPC) { + const auto &MP = S.Stk.pop<MemberPointer>(); + + S.Stk.push<Pointer>(MP.getBase()); + return true; +} + +inline bool GetMemberPtrDecl(InterpState &S, CodePtr OpPC) { + const auto &MP = S.Stk.pop<MemberPointer>(); + + const auto *FD = cast<FunctionDecl>(MP.getDecl()); + const auto *Func = S.getContext().getOrCreateFunction(FD); + + S.Stk.push<FunctionPointer>(Func); + return true; +} + +/// Just emit a diagnostic. The expression that caused emission of this +/// op is not valid in a constant context. +inline bool Invalid(InterpState &S, CodePtr OpPC) { + const SourceLocation &Loc = S.Current->getLocation(OpPC); + S.FFDiag(Loc, diag::note_invalid_subexpr_in_const_expr) + << S.Current->getRange(OpPC); + return false; +} + +inline bool Unsupported(InterpState &S, CodePtr OpPC) { + const SourceLocation &Loc = S.Current->getLocation(OpPC); + S.FFDiag(Loc, diag::note_constexpr_stmt_expr_unsupported) + << S.Current->getRange(OpPC); + return false; +} + +/// Do nothing and just abort execution. +inline bool Error(InterpState &S, CodePtr OpPC) { return false; } + +/// Same here, but only for casts. +inline bool InvalidCast(InterpState &S, CodePtr OpPC, CastKind Kind) { + const SourceLocation &Loc = S.Current->getLocation(OpPC); + + // FIXME: Support diagnosing other invalid cast kinds. + if (Kind == CastKind::Reinterpret) + S.FFDiag(Loc, diag::note_constexpr_invalid_cast) + << static_cast<unsigned>(Kind) << S.Current->getRange(OpPC); + return false; +} + +inline bool InvalidDeclRef(InterpState &S, CodePtr OpPC, + const DeclRefExpr *DR) { + assert(DR); + return CheckDeclRef(S, OpPC, DR); +} + +inline bool SizelessVectorElementSize(InterpState &S, CodePtr OpPC) { + if (S.inConstantContext()) { + const SourceRange &ArgRange = S.Current->getRange(OpPC); + const Expr *E = S.Current->getExpr(OpPC); + S.CCEDiag(E, diag::note_constexpr_non_const_vectorelements) << ArgRange; + } + return false; +} + +inline bool Assume(InterpState &S, CodePtr OpPC) { + const auto Val = S.Stk.pop<Boolean>(); + + if (Val) + return true; + + // Else, diagnose. + const SourceLocation &Loc = S.Current->getLocation(OpPC); + S.CCEDiag(Loc, diag::note_constexpr_assumption_failed); + return false; +} + +template <PrimType Name, class T = typename PrimConv<Name>::T> +inline bool OffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E) { + llvm::SmallVector<int64_t> ArrayIndices; + for (size_t I = 0; I != E->getNumExpressions(); ++I) + ArrayIndices.emplace_back(S.Stk.pop<int64_t>()); + + int64_t Result; + if (!InterpretOffsetOf(S, OpPC, E, ArrayIndices, Result)) + return false; + + S.Stk.push<T>(T::from(Result)); + + return true; +} + +template <PrimType Name, class T = typename PrimConv<Name>::T> +inline bool CheckNonNullArg(InterpState &S, CodePtr OpPC) { + const T &Arg = S.Stk.peek<T>(); + if (!Arg.isZero()) + return true; + + const SourceLocation &Loc = S.Current->getLocation(OpPC); + S.CCEDiag(Loc, diag::note_non_null_attribute_failed); + + return false; +} + +void diagnoseEnumValue(InterpState &S, CodePtr OpPC, const EnumDecl *ED, + const APSInt &Value); + +template <PrimType Name, class T = typename PrimConv<Name>::T> +inline bool CheckEnumValue(InterpState &S, CodePtr OpPC, const EnumDecl *ED) { + assert(ED); + assert(!ED->isFixed()); + const APSInt Val = S.Stk.peek<T>().toAPSInt(); + + if (S.inConstantContext()) + diagnoseEnumValue(S, OpPC, ED, Val); + return true; +} + +/// OldPtr -> Integer -> NewPtr. +template <PrimType TIn, PrimType TOut> +inline bool DecayPtr(InterpState &S, CodePtr OpPC) { + static_assert(isPtrType(TIn) && isPtrType(TOut)); + using FromT = typename PrimConv<TIn>::T; + using ToT = typename PrimConv<TOut>::T; + + const FromT &OldPtr = S.Stk.pop<FromT>(); + S.Stk.push<ToT>(ToT(OldPtr.getIntegerRepresentation(), nullptr)); + return true; +} + +inline bool CheckDecl(InterpState &S, CodePtr OpPC, const VarDecl *VD) { + // An expression E is a core constant expression unless the evaluation of E + // would evaluate one of the following: [C++23] - a control flow that passes + // through a declaration of a variable with static or thread storage duration + // unless that variable is usable in constant expressions. + assert(VD->isLocalVarDecl() && + VD->isStaticLocal()); // Checked before emitting this. + + if (VD == S.EvaluatingDecl) + return true; + + if (!VD->isUsableInConstantExpressions(S.getCtx())) { + S.CCEDiag(VD->getLocation(), diag::note_constexpr_static_local) + << (VD->getTSCSpec() == TSCS_unspecified ? 0 : 1) << VD; + return false; + } + return true; +} + +inline bool Alloc(InterpState &S, CodePtr OpPC, const Descriptor *Desc) { + assert(Desc); + + if (!CheckDynamicMemoryAllocation(S, OpPC)) + return false; + + DynamicAllocator &Allocator = S.getAllocator(); + Block *B = Allocator.allocate(Desc, S.Ctx.getEvalID()); + assert(B); + + S.Stk.push<Pointer>(B, sizeof(InlineDescriptor)); + + return true; +} + +template <PrimType Name, class SizeT = typename PrimConv<Name>::T> +inline bool AllocN(InterpState &S, CodePtr OpPC, PrimType T, const Expr *Source, + bool IsNoThrow) { + if (!CheckDynamicMemoryAllocation(S, OpPC)) + return false; + + SizeT NumElements = S.Stk.pop<SizeT>(); + if (!CheckArraySize(S, OpPC, &NumElements, primSize(T), IsNoThrow)) { + if (!IsNoThrow) + return false; + + // If this failed and is nothrow, just return a null ptr. + S.Stk.push<Pointer>(0, nullptr); + return true; + } + + DynamicAllocator &Allocator = S.getAllocator(); + Block *B = Allocator.allocate(Source, T, static_cast<size_t>(NumElements), + S.Ctx.getEvalID()); + assert(B); + S.Stk.push<Pointer>(B, sizeof(InlineDescriptor)); + + return true; +} + +template <PrimType Name, class SizeT = typename PrimConv<Name>::T> +inline bool AllocCN(InterpState &S, CodePtr OpPC, const Descriptor *ElementDesc, + bool IsNoThrow) { + if (!CheckDynamicMemoryAllocation(S, OpPC)) + return false; + + SizeT NumElements = S.Stk.pop<SizeT>(); + if (!CheckArraySize(S, OpPC, &NumElements, ElementDesc->getSize(), + IsNoThrow)) { + if (!IsNoThrow) + return false; + + // If this failed and is nothrow, just return a null ptr. + S.Stk.push<Pointer>(0, ElementDesc); + return true; + } + + DynamicAllocator &Allocator = S.getAllocator(); + Block *B = Allocator.allocate(ElementDesc, static_cast<size_t>(NumElements), + S.Ctx.getEvalID()); + assert(B); + + S.Stk.push<Pointer>(B, sizeof(InlineDescriptor)); + + return true; +} + +bool RunDestructors(InterpState &S, CodePtr OpPC, const Block *B); +static inline bool Free(InterpState &S, CodePtr OpPC, bool DeleteIsArrayForm) { + if (!CheckDynamicMemoryAllocation(S, OpPC)) + return false; + + const Expr *Source = nullptr; + const Block *BlockToDelete = nullptr; + { + // Extra scope for this so the block doesn't have this pointer + // pointing to it when we destroy it. + const Pointer &Ptr = S.Stk.pop<Pointer>(); + + // Deleteing nullptr is always fine. + if (Ptr.isZero()) + return true; + + if (!Ptr.isRoot() || Ptr.isOnePastEnd() || Ptr.isArrayElement()) { + const SourceInfo &Loc = S.Current->getSource(OpPC); + S.FFDiag(Loc, diag::note_constexpr_delete_subobject) + << Ptr.toDiagnosticString(S.getCtx()) << Ptr.isOnePastEnd(); + return false; + } + + Source = Ptr.getDeclDesc()->asExpr(); + BlockToDelete = Ptr.block(); + + if (!CheckDeleteSource(S, OpPC, Source, Ptr)) + return false; + } + assert(Source); + assert(BlockToDelete); + + // Invoke destructors before deallocating the memory. + if (!RunDestructors(S, OpPC, BlockToDelete)) + return false; + + DynamicAllocator &Allocator = S.getAllocator(); + bool WasArrayAlloc = Allocator.isArrayAllocation(Source); + const Descriptor *BlockDesc = BlockToDelete->getDescriptor(); + + if (!Allocator.deallocate(Source, BlockToDelete, S)) { + // Nothing has been deallocated, this must be a double-delete. + const SourceInfo &Loc = S.Current->getSource(OpPC); + S.FFDiag(Loc, diag::note_constexpr_double_delete); + return false; + } + return CheckNewDeleteForms(S, OpPC, WasArrayAlloc, DeleteIsArrayForm, + BlockDesc, Source); +} + +//===----------------------------------------------------------------------===// +// Read opcode arguments +//===----------------------------------------------------------------------===// + +template <typename T> inline T ReadArg(InterpState &S, CodePtr &OpPC) { + if constexpr (std::is_pointer<T>::value) { + uint32_t ID = OpPC.read<uint32_t>(); + return reinterpret_cast<T>(S.P.getNativePointer(ID)); + } else { + return OpPC.read<T>(); + } +} + +template <> inline Floating ReadArg<Floating>(InterpState &S, CodePtr &OpPC) { + Floating F = Floating::deserialize(*OpPC); + OpPC += align(F.bytesToSerialize()); + return F; +} + +template <> +inline IntegralAP<false> ReadArg<IntegralAP<false>>(InterpState &S, + CodePtr &OpPC) { + IntegralAP<false> I = IntegralAP<false>::deserialize(*OpPC); + OpPC += align(I.bytesToSerialize()); + return I; +} + +template <> +inline IntegralAP<true> ReadArg<IntegralAP<true>>(InterpState &S, + CodePtr &OpPC) { + IntegralAP<true> I = IntegralAP<true>::deserialize(*OpPC); + OpPC += align(I.bytesToSerialize()); + return I; +} } // namespace interp } // namespace clang diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.cpp b/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.cpp index ed6e8910194d..5ac778aeb607 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.cpp +++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.cpp @@ -16,11 +16,16 @@ using namespace clang; using namespace clang::interp; - - void Block::addPointer(Pointer *P) { - if (IsStatic) + assert(P); + if (IsStatic) { + assert(!Pointers); return; + } + +#ifndef NDEBUG + assert(!hasPointer(P)); +#endif if (Pointers) Pointers->Prev = P; P->Next = Pointers; @@ -29,10 +34,19 @@ void Block::addPointer(Pointer *P) { } void Block::removePointer(Pointer *P) { - if (IsStatic) + assert(P); + if (IsStatic) { + assert(!Pointers); return; + } + +#ifndef NDEBUG + assert(hasPointer(P)); +#endif + if (Pointers == P) Pointers = P->Next; + if (P->Prev) P->Prev->Next = P->Next; if (P->Next) @@ -44,24 +58,42 @@ void Block::cleanup() { (reinterpret_cast<DeadBlock *>(this + 1) - 1)->free(); } -void Block::movePointer(Pointer *From, Pointer *To) { - if (IsStatic) +void Block::replacePointer(Pointer *Old, Pointer *New) { + assert(Old); + assert(New); + if (IsStatic) { + assert(!Pointers); return; - To->Prev = From->Prev; - if (To->Prev) - To->Prev->Next = To; - To->Next = From->Next; - if (To->Next) - To->Next->Prev = To; - if (Pointers == From) - Pointers = To; - - From->Prev = nullptr; - From->Next = nullptr; + } + +#ifndef NDEBUG + assert(hasPointer(Old)); +#endif + + removePointer(Old); + addPointer(New); + + Old->PointeeStorage.BS.Pointee = nullptr; + +#ifndef NDEBUG + assert(!hasPointer(Old)); + assert(hasPointer(New)); +#endif } +#ifndef NDEBUG +bool Block::hasPointer(const Pointer *P) const { + for (const Pointer *C = Pointers; C; C = C->Next) { + if (C == P) + return true; + } + return false; +} +#endif + DeadBlock::DeadBlock(DeadBlock *&Root, Block *Blk) - : Root(Root), B(Blk->Desc, Blk->IsStatic, Blk->IsExtern, /*isDead=*/true) { + : Root(Root), + B(~0u, Blk->Desc, Blk->IsStatic, Blk->IsExtern, /*isDead=*/true) { // Add the block to the chain of dead blocks. if (Root) Root->Prev = this; @@ -73,15 +105,19 @@ DeadBlock::DeadBlock(DeadBlock *&Root, Block *Blk) // Transfer pointers. B.Pointers = Blk->Pointers; for (Pointer *P = Blk->Pointers; P; P = P->Next) - P->Pointee = &B; + P->PointeeStorage.BS.Pointee = &B; + Blk->Pointers = nullptr; } void DeadBlock::free() { + if (B.IsInitialized) + B.invokeDtor(); + if (Prev) Prev->Next = Next; if (Next) Next->Prev = Prev; if (Root == this) Root = Next; - ::free(this); + std::free(this); } diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.h b/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.h index 0ccdef221c83..3760ded7b13f 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.h +++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.h @@ -25,28 +25,46 @@ namespace clang { namespace interp { class Block; class DeadBlock; -class Context; class InterpState; class Pointer; -class Function; enum PrimType : unsigned; /// A memory block, either on the stack or in the heap. /// -/// The storage described by the block immediately follows it in memory. -class Block { +/// The storage described by the block is immediately followed by +/// optional metadata, which is followed by the actual data. +/// +/// Block* rawData() data() +/// │ │ │ +/// │ │ │ +/// ▼ ▼ ▼ +/// ┌───────────────┬─────────────────────────┬─────────────────┐ +/// │ Block │ Metadata │ Data │ +/// │ sizeof(Block) │ Desc->getMetadataSize() │ Desc->getSize() │ +/// └───────────────┴─────────────────────────┴─────────────────┘ +/// +/// Desc->getAllocSize() describes the size after the Block, i.e. +/// the data size and the metadata size. +/// +class Block final { public: - // Creates a new block. - Block(const llvm::Optional<unsigned> &DeclID, Descriptor *Desc, - bool IsStatic = false, bool IsExtern = false) - : DeclID(DeclID), IsStatic(IsStatic), IsExtern(IsExtern), Desc(Desc) {} + /// Creates a new block. + Block(unsigned EvalID, const std::optional<unsigned> &DeclID, + const Descriptor *Desc, bool IsStatic = false, bool IsExtern = false) + : EvalID(EvalID), DeclID(DeclID), IsStatic(IsStatic), IsExtern(IsExtern), + IsDynamic(false), Desc(Desc) { + assert(Desc); + } - Block(Descriptor *Desc, bool IsStatic = false, bool IsExtern = false) - : DeclID((unsigned)-1), IsStatic(IsStatic), IsExtern(IsExtern), - Desc(Desc) {} + Block(unsigned EvalID, const Descriptor *Desc, bool IsStatic = false, + bool IsExtern = false) + : EvalID(EvalID), DeclID((unsigned)-1), IsStatic(IsStatic), + IsExtern(IsExtern), IsDynamic(false), Desc(Desc) { + assert(Desc); + } /// Returns the block's descriptor. - Descriptor *getDescriptor() const { return Desc; } + const Descriptor *getDescriptor() const { return Desc; } /// Checks if the block has any live pointers. bool hasPointers() const { return Pointers; } /// Checks if the block is extern. @@ -55,67 +73,118 @@ public: bool isStatic() const { return IsStatic; } /// Checks if the block is temporary. bool isTemporary() const { return Desc->IsTemporary; } + bool isDynamic() const { return IsDynamic; } /// Returns the size of the block. - InterpSize getSize() const { return Desc->getAllocSize(); } + unsigned getSize() const { return Desc->getAllocSize(); } /// Returns the declaration ID. - llvm::Optional<unsigned> getDeclID() const { return DeclID; } + std::optional<unsigned> getDeclID() const { return DeclID; } + /// Returns whether the data of this block has been initialized via + /// invoking the Ctor func. + bool isInitialized() const { return IsInitialized; } + /// The Evaluation ID this block was created in. + unsigned getEvalID() const { return EvalID; } /// Returns a pointer to the stored data. - char *data() { return reinterpret_cast<char *>(this + 1); } + /// You are allowed to read Desc->getSize() bytes from this address. + std::byte *data() { + // rawData might contain metadata as well. + size_t DataOffset = Desc->getMetadataSize(); + return rawData() + DataOffset; + } + const std::byte *data() const { + // rawData might contain metadata as well. + size_t DataOffset = Desc->getMetadataSize(); + return rawData() + DataOffset; + } - /// Returns a view over the data. - template <typename T> - T &deref() { return *reinterpret_cast<T *>(data()); } + /// Returns a pointer to the raw data, including metadata. + /// You are allowed to read Desc->getAllocSize() bytes from this address. + std::byte *rawData() { + return reinterpret_cast<std::byte *>(this) + sizeof(Block); + } + const std::byte *rawData() const { + return reinterpret_cast<const std::byte *>(this) + sizeof(Block); + } /// Invokes the constructor. void invokeCtor() { - std::memset(data(), 0, getSize()); + assert(!IsInitialized); + std::memset(rawData(), 0, Desc->getAllocSize()); if (Desc->CtorFn) Desc->CtorFn(this, data(), Desc->IsConst, Desc->IsMutable, /*isActive=*/true, Desc); + IsInitialized = true; + } + + /// Invokes the Destructor. + void invokeDtor() { + assert(IsInitialized); + if (Desc->DtorFn) + Desc->DtorFn(this, data(), Desc); + IsInitialized = false; } -protected: + void dump() const { dump(llvm::errs()); } + void dump(llvm::raw_ostream &OS) const; + +private: friend class Pointer; friend class DeadBlock; friend class InterpState; + friend class DynamicAllocator; - Block(Descriptor *Desc, bool IsExtern, bool IsStatic, bool IsDead) - : IsStatic(IsStatic), IsExtern(IsExtern), IsDead(true), Desc(Desc) {} + Block(unsigned EvalID, const Descriptor *Desc, bool IsExtern, bool IsStatic, + bool IsDead) + : EvalID(EvalID), IsStatic(IsStatic), IsExtern(IsExtern), IsDead(true), + IsDynamic(false), Desc(Desc) { + assert(Desc); + } - // Deletes a dead block at the end of its lifetime. + /// Deletes a dead block at the end of its lifetime. void cleanup(); - // Pointer chain management. + /// Pointer chain management. void addPointer(Pointer *P); void removePointer(Pointer *P); - void movePointer(Pointer *From, Pointer *To); + void replacePointer(Pointer *Old, Pointer *New); +#ifndef NDEBUG + bool hasPointer(const Pointer *P) const; +#endif + const unsigned EvalID = ~0u; /// Start of the chain of pointers. Pointer *Pointers = nullptr; /// Unique identifier of the declaration. - llvm::Optional<unsigned> DeclID; + std::optional<unsigned> DeclID; /// Flag indicating if the block has static storage duration. bool IsStatic = false; /// Flag indicating if the block is an extern. bool IsExtern = false; - /// Flag indicating if the pointer is dead. + /// Flag indicating if the pointer is dead. This is only ever + /// set once, when converting the Block to a DeadBlock. bool IsDead = false; + /// Flag indicating if the block contents have been initialized + /// via invokeCtor. + bool IsInitialized = false; + /// Flag indicating if this block has been allocated via dynamic + /// memory allocation (e.g. malloc). + bool IsDynamic = false; /// Pointer to the stack slot descriptor. - Descriptor *Desc; + const Descriptor *Desc; }; /// Descriptor for a dead block. /// /// Dead blocks are chained in a double-linked list to deallocate them /// whenever pointers become dead. -class DeadBlock { +class DeadBlock final { public: /// Copies the block. DeadBlock(DeadBlock *&Root, Block *Blk); /// Returns a pointer to the stored data. - char *data() { return B.data(); } + std::byte *data() { return B.data(); } + std::byte *rawData() { return B.rawData(); } private: friend class Block; diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpBuiltin.cpp b/contrib/llvm-project/clang/lib/AST/Interp/InterpBuiltin.cpp new file mode 100644 index 000000000000..98928b3c22d7 --- /dev/null +++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpBuiltin.cpp @@ -0,0 +1,1593 @@ +//===--- InterpBuiltin.cpp - Interpreter for the constexpr VM ---*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +#include "../ExprConstShared.h" +#include "Boolean.h" +#include "Interp.h" +#include "PrimType.h" +#include "clang/AST/OSLog.h" +#include "clang/AST/RecordLayout.h" +#include "clang/Basic/Builtins.h" +#include "clang/Basic/TargetInfo.h" +#include "llvm/Support/SipHash.h" + +namespace clang { +namespace interp { + +static unsigned callArgSize(const InterpState &S, const CallExpr *C) { + unsigned O = 0; + + for (const Expr *E : C->arguments()) { + O += align(primSize(*S.getContext().classify(E))); + } + + return O; +} + +template <typename T> +static T getParam(const InterpFrame *Frame, unsigned Index) { + assert(Frame->getFunction()->getNumParams() > Index); + unsigned Offset = Frame->getFunction()->getParamOffset(Index); + return Frame->getParam<T>(Offset); +} + +PrimType getIntPrimType(const InterpState &S) { + const TargetInfo &TI = S.getCtx().getTargetInfo(); + unsigned IntWidth = TI.getIntWidth(); + + if (IntWidth == 32) + return PT_Sint32; + else if (IntWidth == 16) + return PT_Sint16; + llvm_unreachable("Int isn't 16 or 32 bit?"); +} + +PrimType getLongPrimType(const InterpState &S) { + const TargetInfo &TI = S.getCtx().getTargetInfo(); + unsigned LongWidth = TI.getLongWidth(); + + if (LongWidth == 64) + return PT_Sint64; + else if (LongWidth == 32) + return PT_Sint32; + else if (LongWidth == 16) + return PT_Sint16; + llvm_unreachable("long isn't 16, 32 or 64 bit?"); +} + +/// Peek an integer value from the stack into an APSInt. +static APSInt peekToAPSInt(InterpStack &Stk, PrimType T, size_t Offset = 0) { + if (Offset == 0) + Offset = align(primSize(T)); + + APSInt R; + INT_TYPE_SWITCH(T, R = Stk.peek<T>(Offset).toAPSInt()); + + return R; +} + +/// Pushes \p Val on the stack as the type given by \p QT. +static void pushInteger(InterpState &S, const APSInt &Val, QualType QT) { + assert(QT->isSignedIntegerOrEnumerationType() || + QT->isUnsignedIntegerOrEnumerationType()); + std::optional<PrimType> T = S.getContext().classify(QT); + assert(T); + + if (QT->isSignedIntegerOrEnumerationType()) { + int64_t V = Val.getSExtValue(); + INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V)); }); + } else { + assert(QT->isUnsignedIntegerOrEnumerationType()); + uint64_t V = Val.getZExtValue(); + INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V)); }); + } +} + +template <typename T> +static void pushInteger(InterpState &S, T Val, QualType QT) { + if constexpr (std::is_same_v<T, APInt>) + pushInteger(S, APSInt(Val, !std::is_signed_v<T>), QT); + else + pushInteger(S, + APSInt(APInt(sizeof(T) * 8, static_cast<uint64_t>(Val), + std::is_signed_v<T>), + !std::is_signed_v<T>), + QT); +} + +static void assignInteger(Pointer &Dest, PrimType ValueT, const APSInt &Value) { + INT_TYPE_SWITCH_NO_BOOL( + ValueT, { Dest.deref<T>() = T::from(static_cast<T>(Value)); }); +} + +static bool retPrimValue(InterpState &S, CodePtr OpPC, APValue &Result, + std::optional<PrimType> &T) { + if (!T) + return RetVoid(S, OpPC, Result); + +#define RET_CASE(X) \ + case X: \ + return Ret<X>(S, OpPC, Result); + switch (*T) { + RET_CASE(PT_Ptr); + RET_CASE(PT_FnPtr); + RET_CASE(PT_Float); + RET_CASE(PT_Bool); + RET_CASE(PT_Sint8); + RET_CASE(PT_Uint8); + RET_CASE(PT_Sint16); + RET_CASE(PT_Uint16); + RET_CASE(PT_Sint32); + RET_CASE(PT_Uint32); + RET_CASE(PT_Sint64); + RET_CASE(PT_Uint64); + default: + llvm_unreachable("Unsupported return type for builtin function"); + } +#undef RET_CASE +} + +static bool interp__builtin_is_constant_evaluated(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const CallExpr *Call) { + // The current frame is the one for __builtin_is_constant_evaluated. + // The one above that, potentially the one for std::is_constant_evaluated(). + if (S.inConstantContext() && !S.checkingPotentialConstantExpression() && + Frame->Caller && S.getEvalStatus().Diag) { + auto isStdCall = [](const FunctionDecl *F) -> bool { + return F && F->isInStdNamespace() && F->getIdentifier() && + F->getIdentifier()->isStr("is_constant_evaluated"); + }; + const InterpFrame *Caller = Frame->Caller; + + if (Caller->Caller && isStdCall(Caller->getCallee())) { + const Expr *E = Caller->Caller->getExpr(Caller->getRetPC()); + S.report(E->getExprLoc(), + diag::warn_is_constant_evaluated_always_true_constexpr) + << "std::is_constant_evaluated" << E->getSourceRange(); + } else { + const Expr *E = Frame->Caller->getExpr(Frame->getRetPC()); + S.report(E->getExprLoc(), + diag::warn_is_constant_evaluated_always_true_constexpr) + << "__builtin_is_constant_evaluated" << E->getSourceRange(); + } + } + + S.Stk.push<Boolean>(Boolean::from(S.inConstantContext())); + return true; +} + +static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const CallExpr *Call) { + const Pointer &A = getParam<Pointer>(Frame, 0); + const Pointer &B = getParam<Pointer>(Frame, 1); + + if (!CheckLive(S, OpPC, A, AK_Read) || !CheckLive(S, OpPC, B, AK_Read)) + return false; + + if (A.isDummy() || B.isDummy()) + return false; + + assert(A.getFieldDesc()->isPrimitiveArray()); + assert(B.getFieldDesc()->isPrimitiveArray()); + + unsigned IndexA = A.getIndex(); + unsigned IndexB = B.getIndex(); + int32_t Result = 0; + for (;; ++IndexA, ++IndexB) { + const Pointer &PA = A.atIndex(IndexA); + const Pointer &PB = B.atIndex(IndexB); + if (!CheckRange(S, OpPC, PA, AK_Read) || + !CheckRange(S, OpPC, PB, AK_Read)) { + return false; + } + uint8_t CA = PA.deref<uint8_t>(); + uint8_t CB = PB.deref<uint8_t>(); + + if (CA > CB) { + Result = 1; + break; + } else if (CA < CB) { + Result = -1; + break; + } + if (CA == 0 || CB == 0) + break; + } + + pushInteger(S, Result, Call->getType()); + return true; +} + +static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const CallExpr *Call) { + const Pointer &StrPtr = getParam<Pointer>(Frame, 0); + + if (!CheckArray(S, OpPC, StrPtr)) + return false; + + if (!CheckLive(S, OpPC, StrPtr, AK_Read)) + return false; + + if (!CheckDummy(S, OpPC, StrPtr, AK_Read)) + return false; + + assert(StrPtr.getFieldDesc()->isPrimitiveArray()); + + size_t Len = 0; + for (size_t I = StrPtr.getIndex();; ++I, ++Len) { + const Pointer &ElemPtr = StrPtr.atIndex(I); + + if (!CheckRange(S, OpPC, ElemPtr, AK_Read)) + return false; + + uint8_t Val = ElemPtr.deref<uint8_t>(); + if (Val == 0) + break; + } + + pushInteger(S, Len, Call->getType()); + + return true; +} + +static bool interp__builtin_nan(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, const Function *F, + bool Signaling) { + const Pointer &Arg = getParam<Pointer>(Frame, 0); + + if (!CheckLoad(S, OpPC, Arg)) + return false; + + assert(Arg.getFieldDesc()->isPrimitiveArray()); + + // Convert the given string to an integer using StringRef's API. + llvm::APInt Fill; + std::string Str; + assert(Arg.getNumElems() >= 1); + for (unsigned I = 0;; ++I) { + const Pointer &Elem = Arg.atIndex(I); + + if (!CheckLoad(S, OpPC, Elem)) + return false; + + if (Elem.deref<int8_t>() == 0) + break; + + Str += Elem.deref<char>(); + } + + // Treat empty strings as if they were zero. + if (Str.empty()) + Fill = llvm::APInt(32, 0); + else if (StringRef(Str).getAsInteger(0, Fill)) + return false; + + const llvm::fltSemantics &TargetSemantics = + S.getCtx().getFloatTypeSemantics(F->getDecl()->getReturnType()); + + Floating Result; + if (S.getCtx().getTargetInfo().isNan2008()) { + if (Signaling) + Result = Floating( + llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill)); + else + Result = Floating( + llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill)); + } else { + // Prior to IEEE 754-2008, architectures were allowed to choose whether + // the first bit of their significand was set for qNaN or sNaN. MIPS chose + // a different encoding to what became a standard in 2008, and for pre- + // 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as + // sNaN. This is now known as "legacy NaN" encoding. + if (Signaling) + Result = Floating( + llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill)); + else + Result = Floating( + llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill)); + } + + S.Stk.push<Floating>(Result); + return true; +} + +static bool interp__builtin_inf(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, const Function *F) { + const llvm::fltSemantics &TargetSemantics = + S.getCtx().getFloatTypeSemantics(F->getDecl()->getReturnType()); + + S.Stk.push<Floating>(Floating::getInf(TargetSemantics)); + return true; +} + +static bool interp__builtin_copysign(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const Function *F) { + const Floating &Arg1 = getParam<Floating>(Frame, 0); + const Floating &Arg2 = getParam<Floating>(Frame, 1); + + APFloat Copy = Arg1.getAPFloat(); + Copy.copySign(Arg2.getAPFloat()); + S.Stk.push<Floating>(Floating(Copy)); + + return true; +} + +static bool interp__builtin_fmin(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, const Function *F) { + const Floating &LHS = getParam<Floating>(Frame, 0); + const Floating &RHS = getParam<Floating>(Frame, 1); + + Floating Result; + + // When comparing zeroes, return -0.0 if one of the zeroes is negative. + if (LHS.isZero() && RHS.isZero() && RHS.isNegative()) + Result = RHS; + else if (LHS.isNan() || RHS < LHS) + Result = RHS; + else + Result = LHS; + + S.Stk.push<Floating>(Result); + return true; +} + +static bool interp__builtin_fmax(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const Function *Func) { + const Floating &LHS = getParam<Floating>(Frame, 0); + const Floating &RHS = getParam<Floating>(Frame, 1); + + Floating Result; + + // When comparing zeroes, return +0.0 if one of the zeroes is positive. + if (LHS.isZero() && RHS.isZero() && LHS.isNegative()) + Result = RHS; + else if (LHS.isNan() || RHS > LHS) + Result = RHS; + else + Result = LHS; + + S.Stk.push<Floating>(Result); + return true; +} + +/// Defined as __builtin_isnan(...), to accommodate the fact that it can +/// take a float, double, long double, etc. +/// But for us, that's all a Floating anyway. +static bool interp__builtin_isnan(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, const Function *F, + const CallExpr *Call) { + const Floating &Arg = S.Stk.peek<Floating>(); + + pushInteger(S, Arg.isNan(), Call->getType()); + return true; +} + +static bool interp__builtin_issignaling(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const Function *F, + const CallExpr *Call) { + const Floating &Arg = S.Stk.peek<Floating>(); + + pushInteger(S, Arg.isSignaling(), Call->getType()); + return true; +} + +static bool interp__builtin_isinf(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, const Function *F, + bool CheckSign, const CallExpr *Call) { + const Floating &Arg = S.Stk.peek<Floating>(); + bool IsInf = Arg.isInf(); + + if (CheckSign) + pushInteger(S, IsInf ? (Arg.isNegative() ? -1 : 1) : 0, Call->getType()); + else + pushInteger(S, Arg.isInf(), Call->getType()); + return true; +} + +static bool interp__builtin_isfinite(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const Function *F, const CallExpr *Call) { + const Floating &Arg = S.Stk.peek<Floating>(); + + pushInteger(S, Arg.isFinite(), Call->getType()); + return true; +} + +static bool interp__builtin_isnormal(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const Function *F, const CallExpr *Call) { + const Floating &Arg = S.Stk.peek<Floating>(); + + pushInteger(S, Arg.isNormal(), Call->getType()); + return true; +} + +static bool interp__builtin_issubnormal(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const Function *F, + const CallExpr *Call) { + const Floating &Arg = S.Stk.peek<Floating>(); + + pushInteger(S, Arg.isDenormal(), Call->getType()); + return true; +} + +static bool interp__builtin_iszero(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, const Function *F, + const CallExpr *Call) { + const Floating &Arg = S.Stk.peek<Floating>(); + + pushInteger(S, Arg.isZero(), Call->getType()); + return true; +} + +/// First parameter to __builtin_isfpclass is the floating value, the +/// second one is an integral value. +static bool interp__builtin_isfpclass(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const Function *Func, + const CallExpr *Call) { + PrimType FPClassArgT = *S.getContext().classify(Call->getArg(1)->getType()); + APSInt FPClassArg = peekToAPSInt(S.Stk, FPClassArgT); + const Floating &F = + S.Stk.peek<Floating>(align(primSize(FPClassArgT) + primSize(PT_Float))); + + int32_t Result = + static_cast<int32_t>((F.classify() & FPClassArg).getZExtValue()); + pushInteger(S, Result, Call->getType()); + + return true; +} + +/// Five int values followed by one floating value. +static bool interp__builtin_fpclassify(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const Function *Func, + const CallExpr *Call) { + const Floating &Val = S.Stk.peek<Floating>(); + + unsigned Index; + switch (Val.getCategory()) { + case APFloat::fcNaN: + Index = 0; + break; + case APFloat::fcInfinity: + Index = 1; + break; + case APFloat::fcNormal: + Index = Val.isDenormal() ? 3 : 2; + break; + case APFloat::fcZero: + Index = 4; + break; + } + + // The last argument is first on the stack. + assert(Index <= 4); + unsigned IntSize = primSize(getIntPrimType(S)); + unsigned Offset = + align(primSize(PT_Float)) + ((1 + (4 - Index)) * align(IntSize)); + + APSInt I = peekToAPSInt(S.Stk, getIntPrimType(S), Offset); + pushInteger(S, I, Call->getType()); + return true; +} + +// The C standard says "fabs raises no floating-point exceptions, +// even if x is a signaling NaN. The returned value is independent of +// the current rounding direction mode." Therefore constant folding can +// proceed without regard to the floating point settings. +// Reference, WG14 N2478 F.10.4.3 +static bool interp__builtin_fabs(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const Function *Func) { + const Floating &Val = getParam<Floating>(Frame, 0); + + S.Stk.push<Floating>(Floating::abs(Val)); + return true; +} + +static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const Function *Func, + const CallExpr *Call) { + PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); + APSInt Val = peekToAPSInt(S.Stk, ArgT); + pushInteger(S, Val.popcount(), Call->getType()); + return true; +} + +static bool interp__builtin_parity(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const Function *Func, const CallExpr *Call) { + PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); + APSInt Val = peekToAPSInt(S.Stk, ArgT); + pushInteger(S, Val.popcount() % 2, Call->getType()); + return true; +} + +static bool interp__builtin_clrsb(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const Function *Func, const CallExpr *Call) { + PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); + APSInt Val = peekToAPSInt(S.Stk, ArgT); + pushInteger(S, Val.getBitWidth() - Val.getSignificantBits(), Call->getType()); + return true; +} + +static bool interp__builtin_bitreverse(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const Function *Func, + const CallExpr *Call) { + PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); + APSInt Val = peekToAPSInt(S.Stk, ArgT); + pushInteger(S, Val.reverseBits(), Call->getType()); + return true; +} + +static bool interp__builtin_classify_type(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const Function *Func, + const CallExpr *Call) { + // This is an unevaluated call, so there are no arguments on the stack. + assert(Call->getNumArgs() == 1); + const Expr *Arg = Call->getArg(0); + + GCCTypeClass ResultClass = + EvaluateBuiltinClassifyType(Arg->getType(), S.getLangOpts()); + int32_t ReturnVal = static_cast<int32_t>(ResultClass); + pushInteger(S, ReturnVal, Call->getType()); + return true; +} + +// __builtin_expect(long, long) +// __builtin_expect_with_probability(long, long, double) +static bool interp__builtin_expect(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const Function *Func, const CallExpr *Call) { + // The return value is simply the value of the first parameter. + // We ignore the probability. + unsigned NumArgs = Call->getNumArgs(); + assert(NumArgs == 2 || NumArgs == 3); + + PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); + unsigned Offset = align(primSize(getLongPrimType(S))) * 2; + if (NumArgs == 3) + Offset += align(primSize(PT_Float)); + + APSInt Val = peekToAPSInt(S.Stk, ArgT, Offset); + pushInteger(S, Val, Call->getType()); + return true; +} + +/// rotateleft(value, amount) +static bool interp__builtin_rotate(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const Function *Func, const CallExpr *Call, + bool Right) { + PrimType AmountT = *S.getContext().classify(Call->getArg(1)->getType()); + PrimType ValueT = *S.getContext().classify(Call->getArg(0)->getType()); + + APSInt Amount = peekToAPSInt(S.Stk, AmountT); + APSInt Value = peekToAPSInt( + S.Stk, ValueT, align(primSize(AmountT)) + align(primSize(ValueT))); + + APSInt Result; + if (Right) + Result = APSInt(Value.rotr(Amount.urem(Value.getBitWidth())), + /*IsUnsigned=*/true); + else // Left. + Result = APSInt(Value.rotl(Amount.urem(Value.getBitWidth())), + /*IsUnsigned=*/true); + + pushInteger(S, Result, Call->getType()); + return true; +} + +static bool interp__builtin_ffs(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, const Function *Func, + const CallExpr *Call) { + PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); + APSInt Value = peekToAPSInt(S.Stk, ArgT); + + uint64_t N = Value.countr_zero(); + pushInteger(S, N == Value.getBitWidth() ? 0 : N + 1, Call->getType()); + return true; +} + +static bool interp__builtin_addressof(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const Function *Func, + const CallExpr *Call) { + assert(Call->getArg(0)->isLValue()); + PrimType PtrT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr); + + if (PtrT == PT_FnPtr) { + const FunctionPointer &Arg = S.Stk.peek<FunctionPointer>(); + S.Stk.push<FunctionPointer>(Arg); + } else if (PtrT == PT_Ptr) { + const Pointer &Arg = S.Stk.peek<Pointer>(); + S.Stk.push<Pointer>(Arg); + } else { + assert(false && "Unsupported pointer type passed to __builtin_addressof()"); + } + return true; +} + +static bool interp__builtin_move(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, const Function *Func, + const CallExpr *Call) { + + PrimType ArgT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr); + + TYPE_SWITCH(ArgT, const T &Arg = S.Stk.peek<T>(); S.Stk.push<T>(Arg);); + + return Func->getDecl()->isConstexpr(); +} + +static bool interp__builtin_eh_return_data_regno(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const Function *Func, + const CallExpr *Call) { + PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); + APSInt Arg = peekToAPSInt(S.Stk, ArgT); + + int Result = + S.getCtx().getTargetInfo().getEHDataRegisterNumber(Arg.getZExtValue()); + pushInteger(S, Result, Call->getType()); + return true; +} + +/// Just takes the first Argument to the call and puts it on the stack. +static bool noopPointer(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, + const Function *Func, const CallExpr *Call) { + const Pointer &Arg = S.Stk.peek<Pointer>(); + S.Stk.push<Pointer>(Arg); + return true; +} + +// Two integral values followed by a pointer (lhs, rhs, resultOut) +static bool interp__builtin_overflowop(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const Function *Func, + const CallExpr *Call) { + Pointer &ResultPtr = S.Stk.peek<Pointer>(); + if (ResultPtr.isDummy()) + return false; + + unsigned BuiltinOp = Func->getBuiltinID(); + PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType()); + PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType()); + APSInt RHS = peekToAPSInt(S.Stk, RHST, + align(primSize(PT_Ptr)) + align(primSize(RHST))); + APSInt LHS = peekToAPSInt(S.Stk, LHST, + align(primSize(PT_Ptr)) + align(primSize(RHST)) + + align(primSize(LHST))); + QualType ResultType = Call->getArg(2)->getType()->getPointeeType(); + PrimType ResultT = *S.getContext().classify(ResultType); + bool Overflow; + + APSInt Result; + if (BuiltinOp == Builtin::BI__builtin_add_overflow || + BuiltinOp == Builtin::BI__builtin_sub_overflow || + BuiltinOp == Builtin::BI__builtin_mul_overflow) { + bool IsSigned = LHS.isSigned() || RHS.isSigned() || + ResultType->isSignedIntegerOrEnumerationType(); + bool AllSigned = LHS.isSigned() && RHS.isSigned() && + ResultType->isSignedIntegerOrEnumerationType(); + uint64_t LHSSize = LHS.getBitWidth(); + uint64_t RHSSize = RHS.getBitWidth(); + uint64_t ResultSize = S.getCtx().getTypeSize(ResultType); + uint64_t MaxBits = std::max(std::max(LHSSize, RHSSize), ResultSize); + + // Add an additional bit if the signedness isn't uniformly agreed to. We + // could do this ONLY if there is a signed and an unsigned that both have + // MaxBits, but the code to check that is pretty nasty. The issue will be + // caught in the shrink-to-result later anyway. + if (IsSigned && !AllSigned) + ++MaxBits; + + LHS = APSInt(LHS.extOrTrunc(MaxBits), !IsSigned); + RHS = APSInt(RHS.extOrTrunc(MaxBits), !IsSigned); + Result = APSInt(MaxBits, !IsSigned); + } + + // Find largest int. + switch (BuiltinOp) { + default: + llvm_unreachable("Invalid value for BuiltinOp"); + case Builtin::BI__builtin_add_overflow: + case Builtin::BI__builtin_sadd_overflow: + case Builtin::BI__builtin_saddl_overflow: + case Builtin::BI__builtin_saddll_overflow: + case Builtin::BI__builtin_uadd_overflow: + case Builtin::BI__builtin_uaddl_overflow: + case Builtin::BI__builtin_uaddll_overflow: + Result = LHS.isSigned() ? LHS.sadd_ov(RHS, Overflow) + : LHS.uadd_ov(RHS, Overflow); + break; + case Builtin::BI__builtin_sub_overflow: + case Builtin::BI__builtin_ssub_overflow: + case Builtin::BI__builtin_ssubl_overflow: + case Builtin::BI__builtin_ssubll_overflow: + case Builtin::BI__builtin_usub_overflow: + case Builtin::BI__builtin_usubl_overflow: + case Builtin::BI__builtin_usubll_overflow: + Result = LHS.isSigned() ? LHS.ssub_ov(RHS, Overflow) + : LHS.usub_ov(RHS, Overflow); + break; + case Builtin::BI__builtin_mul_overflow: + case Builtin::BI__builtin_smul_overflow: + case Builtin::BI__builtin_smull_overflow: + case Builtin::BI__builtin_smulll_overflow: + case Builtin::BI__builtin_umul_overflow: + case Builtin::BI__builtin_umull_overflow: + case Builtin::BI__builtin_umulll_overflow: + Result = LHS.isSigned() ? LHS.smul_ov(RHS, Overflow) + : LHS.umul_ov(RHS, Overflow); + break; + } + + // In the case where multiple sizes are allowed, truncate and see if + // the values are the same. + if (BuiltinOp == Builtin::BI__builtin_add_overflow || + BuiltinOp == Builtin::BI__builtin_sub_overflow || + BuiltinOp == Builtin::BI__builtin_mul_overflow) { + // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead, + // since it will give us the behavior of a TruncOrSelf in the case where + // its parameter <= its size. We previously set Result to be at least the + // type-size of the result, so getTypeSize(ResultType) <= Resu + APSInt Temp = Result.extOrTrunc(S.getCtx().getTypeSize(ResultType)); + Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType()); + + if (!APSInt::isSameValue(Temp, Result)) + Overflow = true; + Result = Temp; + } + + // Write Result to ResultPtr and put Overflow on the stacl. + assignInteger(ResultPtr, ResultT, Result); + ResultPtr.initialize(); + assert(Func->getDecl()->getReturnType()->isBooleanType()); + S.Stk.push<Boolean>(Overflow); + return true; +} + +/// Three integral values followed by a pointer (lhs, rhs, carry, carryOut). +static bool interp__builtin_carryop(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const Function *Func, + const CallExpr *Call) { + unsigned BuiltinOp = Func->getBuiltinID(); + PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType()); + PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType()); + PrimType CarryT = *S.getContext().classify(Call->getArg(2)->getType()); + APSInt RHS = peekToAPSInt(S.Stk, RHST, + align(primSize(PT_Ptr)) + align(primSize(CarryT)) + + align(primSize(RHST))); + APSInt LHS = + peekToAPSInt(S.Stk, LHST, + align(primSize(PT_Ptr)) + align(primSize(RHST)) + + align(primSize(CarryT)) + align(primSize(LHST))); + APSInt CarryIn = peekToAPSInt( + S.Stk, LHST, align(primSize(PT_Ptr)) + align(primSize(CarryT))); + APSInt CarryOut; + + APSInt Result; + // Copy the number of bits and sign. + Result = LHS; + CarryOut = LHS; + + bool FirstOverflowed = false; + bool SecondOverflowed = false; + switch (BuiltinOp) { + default: + llvm_unreachable("Invalid value for BuiltinOp"); + case Builtin::BI__builtin_addcb: + case Builtin::BI__builtin_addcs: + case Builtin::BI__builtin_addc: + case Builtin::BI__builtin_addcl: + case Builtin::BI__builtin_addcll: + Result = + LHS.uadd_ov(RHS, FirstOverflowed).uadd_ov(CarryIn, SecondOverflowed); + break; + case Builtin::BI__builtin_subcb: + case Builtin::BI__builtin_subcs: + case Builtin::BI__builtin_subc: + case Builtin::BI__builtin_subcl: + case Builtin::BI__builtin_subcll: + Result = + LHS.usub_ov(RHS, FirstOverflowed).usub_ov(CarryIn, SecondOverflowed); + break; + } + // It is possible for both overflows to happen but CGBuiltin uses an OR so + // this is consistent. + CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed); + + Pointer &CarryOutPtr = S.Stk.peek<Pointer>(); + QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType(); + PrimType CarryOutT = *S.getContext().classify(CarryOutType); + assignInteger(CarryOutPtr, CarryOutT, CarryOut); + CarryOutPtr.initialize(); + + assert(Call->getType() == Call->getArg(0)->getType()); + pushInteger(S, Result, Call->getType()); + return true; +} + +static bool interp__builtin_clz(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, const Function *Func, + const CallExpr *Call) { + unsigned CallSize = callArgSize(S, Call); + unsigned BuiltinOp = Func->getBuiltinID(); + PrimType ValT = *S.getContext().classify(Call->getArg(0)); + const APSInt &Val = peekToAPSInt(S.Stk, ValT, CallSize); + + // When the argument is 0, the result of GCC builtins is undefined, whereas + // for Microsoft intrinsics, the result is the bit-width of the argument. + bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 && + BuiltinOp != Builtin::BI__lzcnt && + BuiltinOp != Builtin::BI__lzcnt64; + + if (Val == 0) { + if (Func->getBuiltinID() == Builtin::BI__builtin_clzg && + Call->getNumArgs() == 2) { + // We have a fallback parameter. + PrimType FallbackT = *S.getContext().classify(Call->getArg(1)); + const APSInt &Fallback = peekToAPSInt(S.Stk, FallbackT); + pushInteger(S, Fallback, Call->getType()); + return true; + } + + if (ZeroIsUndefined) + return false; + } + + pushInteger(S, Val.countl_zero(), Call->getType()); + return true; +} + +static bool interp__builtin_ctz(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, const Function *Func, + const CallExpr *Call) { + unsigned CallSize = callArgSize(S, Call); + PrimType ValT = *S.getContext().classify(Call->getArg(0)); + const APSInt &Val = peekToAPSInt(S.Stk, ValT, CallSize); + + if (Val == 0) { + if (Func->getBuiltinID() == Builtin::BI__builtin_ctzg && + Call->getNumArgs() == 2) { + // We have a fallback parameter. + PrimType FallbackT = *S.getContext().classify(Call->getArg(1)); + const APSInt &Fallback = peekToAPSInt(S.Stk, FallbackT); + pushInteger(S, Fallback, Call->getType()); + return true; + } + return false; + } + + pushInteger(S, Val.countr_zero(), Call->getType()); + return true; +} + +static bool interp__builtin_bswap(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const Function *Func, const CallExpr *Call) { + PrimType ReturnT = *S.getContext().classify(Call->getType()); + PrimType ValT = *S.getContext().classify(Call->getArg(0)); + const APSInt &Val = peekToAPSInt(S.Stk, ValT); + assert(Val.getActiveBits() <= 64); + + INT_TYPE_SWITCH(ReturnT, + { S.Stk.push<T>(T::from(Val.byteSwap().getZExtValue())); }); + return true; +} + +/// bool __atomic_always_lock_free(size_t, void const volatile*) +/// bool __atomic_is_lock_free(size_t, void const volatile*) +/// bool __c11_atomic_is_lock_free(size_t) +static bool interp__builtin_atomic_lock_free(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const Function *Func, + const CallExpr *Call) { + unsigned BuiltinOp = Func->getBuiltinID(); + + PrimType ValT = *S.getContext().classify(Call->getArg(0)); + unsigned SizeValOffset = 0; + if (BuiltinOp != Builtin::BI__c11_atomic_is_lock_free) + SizeValOffset = align(primSize(ValT)) + align(primSize(PT_Ptr)); + const APSInt &SizeVal = peekToAPSInt(S.Stk, ValT, SizeValOffset); + + auto returnBool = [&S](bool Value) -> bool { + S.Stk.push<Boolean>(Value); + return true; + }; + + // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power + // of two less than or equal to the maximum inline atomic width, we know it + // is lock-free. If the size isn't a power of two, or greater than the + // maximum alignment where we promote atomics, we know it is not lock-free + // (at least not in the sense of atomic_is_lock_free). Otherwise, + // the answer can only be determined at runtime; for example, 16-byte + // atomics have lock-free implementations on some, but not all, + // x86-64 processors. + + // Check power-of-two. + CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue()); + if (Size.isPowerOfTwo()) { + // Check against inlining width. + unsigned InlineWidthBits = + S.getCtx().getTargetInfo().getMaxAtomicInlineWidth(); + if (Size <= S.getCtx().toCharUnitsFromBits(InlineWidthBits)) { + + // OK, we will inline appropriately-aligned operations of this size, + // and _Atomic(T) is appropriately-aligned. + if (BuiltinOp == Builtin::BI__c11_atomic_is_lock_free || + Size == CharUnits::One()) + return returnBool(true); + + // Same for null pointers. + assert(BuiltinOp != Builtin::BI__c11_atomic_is_lock_free); + const Pointer &Ptr = S.Stk.peek<Pointer>(); + if (Ptr.isZero()) + return returnBool(true); + + QualType PointeeType = Call->getArg(1) + ->IgnoreImpCasts() + ->getType() + ->castAs<PointerType>() + ->getPointeeType(); + // OK, we will inline operations on this object. + if (!PointeeType->isIncompleteType() && + S.getCtx().getTypeAlignInChars(PointeeType) >= Size) + return returnBool(true); + } + } + + if (BuiltinOp == Builtin::BI__atomic_always_lock_free) + return returnBool(false); + + return false; +} + +/// __builtin_complex(Float A, float B); +static bool interp__builtin_complex(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const Function *Func, + const CallExpr *Call) { + const Floating &Arg2 = S.Stk.peek<Floating>(); + const Floating &Arg1 = S.Stk.peek<Floating>(align(primSize(PT_Float)) * 2); + Pointer &Result = S.Stk.peek<Pointer>(align(primSize(PT_Float)) * 2 + + align(primSize(PT_Ptr))); + + Result.atIndex(0).deref<Floating>() = Arg1; + Result.atIndex(0).initialize(); + Result.atIndex(1).deref<Floating>() = Arg2; + Result.atIndex(1).initialize(); + Result.initialize(); + + return true; +} + +/// __builtin_is_aligned() +/// __builtin_align_up() +/// __builtin_align_down() +/// The first parameter is either an integer or a pointer. +/// The second parameter is the requested alignment as an integer. +static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const Function *Func, + const CallExpr *Call) { + unsigned BuiltinOp = Func->getBuiltinID(); + unsigned CallSize = callArgSize(S, Call); + + PrimType AlignmentT = *S.Ctx.classify(Call->getArg(1)); + const APSInt &Alignment = peekToAPSInt(S.Stk, AlignmentT); + + if (Alignment < 0 || !Alignment.isPowerOf2()) { + S.FFDiag(Call, diag::note_constexpr_invalid_alignment) << Alignment; + return false; + } + unsigned SrcWidth = S.getCtx().getIntWidth(Call->getArg(0)->getType()); + APSInt MaxValue(APInt::getOneBitSet(SrcWidth, SrcWidth - 1)); + if (APSInt::compareValues(Alignment, MaxValue) > 0) { + S.FFDiag(Call, diag::note_constexpr_alignment_too_big) + << MaxValue << Call->getArg(0)->getType() << Alignment; + return false; + } + + // The first parameter is either an integer or a pointer (but not a function + // pointer). + PrimType FirstArgT = *S.Ctx.classify(Call->getArg(0)); + + if (isIntegralType(FirstArgT)) { + const APSInt &Src = peekToAPSInt(S.Stk, FirstArgT, CallSize); + APSInt Align = Alignment.extOrTrunc(Src.getBitWidth()); + if (BuiltinOp == Builtin::BI__builtin_align_up) { + APSInt AlignedVal = + APSInt((Src + (Align - 1)) & ~(Align - 1), Src.isUnsigned()); + pushInteger(S, AlignedVal, Call->getType()); + } else if (BuiltinOp == Builtin::BI__builtin_align_down) { + APSInt AlignedVal = APSInt(Src & ~(Align - 1), Src.isUnsigned()); + pushInteger(S, AlignedVal, Call->getType()); + } else { + assert(*S.Ctx.classify(Call->getType()) == PT_Bool); + S.Stk.push<Boolean>((Src & (Align - 1)) == 0); + } + return true; + } + + assert(FirstArgT == PT_Ptr); + const Pointer &Ptr = S.Stk.peek<Pointer>(CallSize); + + unsigned PtrOffset = Ptr.getByteOffset(); + PtrOffset = Ptr.getIndex(); + CharUnits BaseAlignment = + S.getCtx().getDeclAlign(Ptr.getDeclDesc()->asValueDecl()); + CharUnits PtrAlign = + BaseAlignment.alignmentAtOffset(CharUnits::fromQuantity(PtrOffset)); + + if (BuiltinOp == Builtin::BI__builtin_is_aligned) { + if (PtrAlign.getQuantity() >= Alignment) { + S.Stk.push<Boolean>(true); + return true; + } + // If the alignment is not known to be sufficient, some cases could still + // be aligned at run time. However, if the requested alignment is less or + // equal to the base alignment and the offset is not aligned, we know that + // the run-time value can never be aligned. + if (BaseAlignment.getQuantity() >= Alignment && + PtrAlign.getQuantity() < Alignment) { + S.Stk.push<Boolean>(false); + return true; + } + + S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_compute) + << Alignment; + return false; + } + + assert(BuiltinOp == Builtin::BI__builtin_align_down || + BuiltinOp == Builtin::BI__builtin_align_up); + + // For align_up/align_down, we can return the same value if the alignment + // is known to be greater or equal to the requested value. + if (PtrAlign.getQuantity() >= Alignment) { + S.Stk.push<Pointer>(Ptr); + return true; + } + + // The alignment could be greater than the minimum at run-time, so we cannot + // infer much about the resulting pointer value. One case is possible: + // For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we + // can infer the correct index if the requested alignment is smaller than + // the base alignment so we can perform the computation on the offset. + if (BaseAlignment.getQuantity() >= Alignment) { + assert(Alignment.getBitWidth() <= 64 && + "Cannot handle > 64-bit address-space"); + uint64_t Alignment64 = Alignment.getZExtValue(); + CharUnits NewOffset = + CharUnits::fromQuantity(BuiltinOp == Builtin::BI__builtin_align_down + ? llvm::alignDown(PtrOffset, Alignment64) + : llvm::alignTo(PtrOffset, Alignment64)); + + S.Stk.push<Pointer>(Ptr.atIndex(NewOffset.getQuantity())); + return true; + } + + // Otherwise, we cannot constant-evaluate the result. + S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_adjust) << Alignment; + return false; +} + +static bool interp__builtin_os_log_format_buffer_size(InterpState &S, + CodePtr OpPC, + const InterpFrame *Frame, + const Function *Func, + const CallExpr *Call) { + analyze_os_log::OSLogBufferLayout Layout; + analyze_os_log::computeOSLogBufferLayout(S.getCtx(), Call, Layout); + pushInteger(S, Layout.size().getQuantity(), Call->getType()); + return true; +} + +static bool interp__builtin_ptrauth_string_discriminator( + InterpState &S, CodePtr OpPC, const InterpFrame *Frame, + const Function *Func, const CallExpr *Call) { + const auto &Ptr = S.Stk.peek<Pointer>(); + assert(Ptr.getFieldDesc()->isPrimitiveArray()); + + StringRef R(&Ptr.deref<char>(), Ptr.getFieldDesc()->getNumElems() - 1); + uint64_t Result = getPointerAuthStableSipHash(R); + pushInteger(S, Result, Call->getType()); + return true; +} + +bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F, + const CallExpr *Call) { + const InterpFrame *Frame = S.Current; + APValue Dummy; + + std::optional<PrimType> ReturnT = S.getContext().classify(Call); + + switch (F->getBuiltinID()) { + case Builtin::BI__builtin_is_constant_evaluated: + if (!interp__builtin_is_constant_evaluated(S, OpPC, Frame, Call)) + return false; + break; + case Builtin::BI__builtin_assume: + case Builtin::BI__assume: + break; + case Builtin::BI__builtin_strcmp: + if (!interp__builtin_strcmp(S, OpPC, Frame, Call)) + return false; + break; + case Builtin::BI__builtin_strlen: + if (!interp__builtin_strlen(S, OpPC, Frame, Call)) + return false; + break; + case Builtin::BI__builtin_nan: + case Builtin::BI__builtin_nanf: + case Builtin::BI__builtin_nanl: + case Builtin::BI__builtin_nanf16: + case Builtin::BI__builtin_nanf128: + if (!interp__builtin_nan(S, OpPC, Frame, F, /*Signaling=*/false)) + return false; + break; + case Builtin::BI__builtin_nans: + case Builtin::BI__builtin_nansf: + case Builtin::BI__builtin_nansl: + case Builtin::BI__builtin_nansf16: + case Builtin::BI__builtin_nansf128: + if (!interp__builtin_nan(S, OpPC, Frame, F, /*Signaling=*/true)) + return false; + break; + + case Builtin::BI__builtin_huge_val: + case Builtin::BI__builtin_huge_valf: + case Builtin::BI__builtin_huge_vall: + case Builtin::BI__builtin_huge_valf16: + case Builtin::BI__builtin_huge_valf128: + case Builtin::BI__builtin_inf: + case Builtin::BI__builtin_inff: + case Builtin::BI__builtin_infl: + case Builtin::BI__builtin_inff16: + case Builtin::BI__builtin_inff128: + if (!interp__builtin_inf(S, OpPC, Frame, F)) + return false; + break; + case Builtin::BI__builtin_copysign: + case Builtin::BI__builtin_copysignf: + case Builtin::BI__builtin_copysignl: + case Builtin::BI__builtin_copysignf128: + if (!interp__builtin_copysign(S, OpPC, Frame, F)) + return false; + break; + + case Builtin::BI__builtin_fmin: + case Builtin::BI__builtin_fminf: + case Builtin::BI__builtin_fminl: + case Builtin::BI__builtin_fminf16: + case Builtin::BI__builtin_fminf128: + if (!interp__builtin_fmin(S, OpPC, Frame, F)) + return false; + break; + + case Builtin::BI__builtin_fmax: + case Builtin::BI__builtin_fmaxf: + case Builtin::BI__builtin_fmaxl: + case Builtin::BI__builtin_fmaxf16: + case Builtin::BI__builtin_fmaxf128: + if (!interp__builtin_fmax(S, OpPC, Frame, F)) + return false; + break; + + case Builtin::BI__builtin_isnan: + if (!interp__builtin_isnan(S, OpPC, Frame, F, Call)) + return false; + break; + case Builtin::BI__builtin_issignaling: + if (!interp__builtin_issignaling(S, OpPC, Frame, F, Call)) + return false; + break; + + case Builtin::BI__builtin_isinf: + if (!interp__builtin_isinf(S, OpPC, Frame, F, /*Sign=*/false, Call)) + return false; + break; + + case Builtin::BI__builtin_isinf_sign: + if (!interp__builtin_isinf(S, OpPC, Frame, F, /*Sign=*/true, Call)) + return false; + break; + + case Builtin::BI__builtin_isfinite: + if (!interp__builtin_isfinite(S, OpPC, Frame, F, Call)) + return false; + break; + case Builtin::BI__builtin_isnormal: + if (!interp__builtin_isnormal(S, OpPC, Frame, F, Call)) + return false; + break; + case Builtin::BI__builtin_issubnormal: + if (!interp__builtin_issubnormal(S, OpPC, Frame, F, Call)) + return false; + break; + case Builtin::BI__builtin_iszero: + if (!interp__builtin_iszero(S, OpPC, Frame, F, Call)) + return false; + break; + case Builtin::BI__builtin_isfpclass: + if (!interp__builtin_isfpclass(S, OpPC, Frame, F, Call)) + return false; + break; + case Builtin::BI__builtin_fpclassify: + if (!interp__builtin_fpclassify(S, OpPC, Frame, F, Call)) + return false; + break; + + case Builtin::BI__builtin_fabs: + case Builtin::BI__builtin_fabsf: + case Builtin::BI__builtin_fabsl: + case Builtin::BI__builtin_fabsf128: + if (!interp__builtin_fabs(S, OpPC, Frame, F)) + return false; + break; + + case Builtin::BI__builtin_popcount: + case Builtin::BI__builtin_popcountl: + case Builtin::BI__builtin_popcountll: + case Builtin::BI__builtin_popcountg: + case Builtin::BI__popcnt16: // Microsoft variants of popcount + case Builtin::BI__popcnt: + case Builtin::BI__popcnt64: + if (!interp__builtin_popcount(S, OpPC, Frame, F, Call)) + return false; + break; + + case Builtin::BI__builtin_parity: + case Builtin::BI__builtin_parityl: + case Builtin::BI__builtin_parityll: + if (!interp__builtin_parity(S, OpPC, Frame, F, Call)) + return false; + break; + + case Builtin::BI__builtin_clrsb: + case Builtin::BI__builtin_clrsbl: + case Builtin::BI__builtin_clrsbll: + if (!interp__builtin_clrsb(S, OpPC, Frame, F, Call)) + return false; + break; + + case Builtin::BI__builtin_bitreverse8: + case Builtin::BI__builtin_bitreverse16: + case Builtin::BI__builtin_bitreverse32: + case Builtin::BI__builtin_bitreverse64: + if (!interp__builtin_bitreverse(S, OpPC, Frame, F, Call)) + return false; + break; + + case Builtin::BI__builtin_classify_type: + if (!interp__builtin_classify_type(S, OpPC, Frame, F, Call)) + return false; + break; + + case Builtin::BI__builtin_expect: + case Builtin::BI__builtin_expect_with_probability: + if (!interp__builtin_expect(S, OpPC, Frame, F, Call)) + return false; + break; + + case Builtin::BI__builtin_rotateleft8: + case Builtin::BI__builtin_rotateleft16: + case Builtin::BI__builtin_rotateleft32: + case Builtin::BI__builtin_rotateleft64: + case Builtin::BI_rotl8: // Microsoft variants of rotate left + case Builtin::BI_rotl16: + case Builtin::BI_rotl: + case Builtin::BI_lrotl: + case Builtin::BI_rotl64: + if (!interp__builtin_rotate(S, OpPC, Frame, F, Call, /*Right=*/false)) + return false; + break; + + case Builtin::BI__builtin_rotateright8: + case Builtin::BI__builtin_rotateright16: + case Builtin::BI__builtin_rotateright32: + case Builtin::BI__builtin_rotateright64: + case Builtin::BI_rotr8: // Microsoft variants of rotate right + case Builtin::BI_rotr16: + case Builtin::BI_rotr: + case Builtin::BI_lrotr: + case Builtin::BI_rotr64: + if (!interp__builtin_rotate(S, OpPC, Frame, F, Call, /*Right=*/true)) + return false; + break; + + case Builtin::BI__builtin_ffs: + case Builtin::BI__builtin_ffsl: + case Builtin::BI__builtin_ffsll: + if (!interp__builtin_ffs(S, OpPC, Frame, F, Call)) + return false; + break; + case Builtin::BIaddressof: + case Builtin::BI__addressof: + case Builtin::BI__builtin_addressof: + if (!interp__builtin_addressof(S, OpPC, Frame, F, Call)) + return false; + break; + + case Builtin::BIas_const: + case Builtin::BIforward: + case Builtin::BIforward_like: + case Builtin::BImove: + case Builtin::BImove_if_noexcept: + if (!interp__builtin_move(S, OpPC, Frame, F, Call)) + return false; + break; + + case Builtin::BI__builtin_eh_return_data_regno: + if (!interp__builtin_eh_return_data_regno(S, OpPC, Frame, F, Call)) + return false; + break; + + case Builtin::BI__builtin_launder: + if (!noopPointer(S, OpPC, Frame, F, Call)) + return false; + break; + + case Builtin::BI__builtin_add_overflow: + case Builtin::BI__builtin_sub_overflow: + case Builtin::BI__builtin_mul_overflow: + case Builtin::BI__builtin_sadd_overflow: + case Builtin::BI__builtin_uadd_overflow: + case Builtin::BI__builtin_uaddl_overflow: + case Builtin::BI__builtin_uaddll_overflow: + case Builtin::BI__builtin_usub_overflow: + case Builtin::BI__builtin_usubl_overflow: + case Builtin::BI__builtin_usubll_overflow: + case Builtin::BI__builtin_umul_overflow: + case Builtin::BI__builtin_umull_overflow: + case Builtin::BI__builtin_umulll_overflow: + case Builtin::BI__builtin_saddl_overflow: + case Builtin::BI__builtin_saddll_overflow: + case Builtin::BI__builtin_ssub_overflow: + case Builtin::BI__builtin_ssubl_overflow: + case Builtin::BI__builtin_ssubll_overflow: + case Builtin::BI__builtin_smul_overflow: + case Builtin::BI__builtin_smull_overflow: + case Builtin::BI__builtin_smulll_overflow: + if (!interp__builtin_overflowop(S, OpPC, Frame, F, Call)) + return false; + break; + + case Builtin::BI__builtin_addcb: + case Builtin::BI__builtin_addcs: + case Builtin::BI__builtin_addc: + case Builtin::BI__builtin_addcl: + case Builtin::BI__builtin_addcll: + case Builtin::BI__builtin_subcb: + case Builtin::BI__builtin_subcs: + case Builtin::BI__builtin_subc: + case Builtin::BI__builtin_subcl: + case Builtin::BI__builtin_subcll: + if (!interp__builtin_carryop(S, OpPC, Frame, F, Call)) + return false; + break; + + case Builtin::BI__builtin_clz: + case Builtin::BI__builtin_clzl: + case Builtin::BI__builtin_clzll: + case Builtin::BI__builtin_clzs: + case Builtin::BI__builtin_clzg: + case Builtin::BI__lzcnt16: // Microsoft variants of count leading-zeroes + case Builtin::BI__lzcnt: + case Builtin::BI__lzcnt64: + if (!interp__builtin_clz(S, OpPC, Frame, F, Call)) + return false; + break; + + case Builtin::BI__builtin_ctz: + case Builtin::BI__builtin_ctzl: + case Builtin::BI__builtin_ctzll: + case Builtin::BI__builtin_ctzs: + case Builtin::BI__builtin_ctzg: + if (!interp__builtin_ctz(S, OpPC, Frame, F, Call)) + return false; + break; + + case Builtin::BI__builtin_bswap16: + case Builtin::BI__builtin_bswap32: + case Builtin::BI__builtin_bswap64: + if (!interp__builtin_bswap(S, OpPC, Frame, F, Call)) + return false; + break; + + case Builtin::BI__atomic_always_lock_free: + case Builtin::BI__atomic_is_lock_free: + case Builtin::BI__c11_atomic_is_lock_free: + if (!interp__builtin_atomic_lock_free(S, OpPC, Frame, F, Call)) + return false; + break; + + case Builtin::BI__builtin_complex: + if (!interp__builtin_complex(S, OpPC, Frame, F, Call)) + return false; + break; + + case Builtin::BI__builtin_is_aligned: + case Builtin::BI__builtin_align_up: + case Builtin::BI__builtin_align_down: + if (!interp__builtin_is_aligned_up_down(S, OpPC, Frame, F, Call)) + return false; + break; + + case Builtin::BI__builtin_os_log_format_buffer_size: + if (!interp__builtin_os_log_format_buffer_size(S, OpPC, Frame, F, Call)) + return false; + break; + + case Builtin::BI__builtin_ptrauth_string_discriminator: + if (!interp__builtin_ptrauth_string_discriminator(S, OpPC, Frame, F, Call)) + return false; + break; + + default: + S.FFDiag(S.Current->getLocation(OpPC), + diag::note_invalid_subexpr_in_const_expr) + << S.Current->getRange(OpPC); + + return false; + } + + return retPrimValue(S, OpPC, Dummy, ReturnT); +} + +bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E, + llvm::ArrayRef<int64_t> ArrayIndices, + int64_t &IntResult) { + CharUnits Result; + unsigned N = E->getNumComponents(); + assert(N > 0); + + unsigned ArrayIndex = 0; + QualType CurrentType = E->getTypeSourceInfo()->getType(); + for (unsigned I = 0; I != N; ++I) { + const OffsetOfNode &Node = E->getComponent(I); + switch (Node.getKind()) { + case OffsetOfNode::Field: { + const FieldDecl *MemberDecl = Node.getField(); + const RecordType *RT = CurrentType->getAs<RecordType>(); + if (!RT) + return false; + const RecordDecl *RD = RT->getDecl(); + if (RD->isInvalidDecl()) + return false; + const ASTRecordLayout &RL = S.getCtx().getASTRecordLayout(RD); + unsigned FieldIndex = MemberDecl->getFieldIndex(); + assert(FieldIndex < RL.getFieldCount() && "offsetof field in wrong type"); + Result += S.getCtx().toCharUnitsFromBits(RL.getFieldOffset(FieldIndex)); + CurrentType = MemberDecl->getType().getNonReferenceType(); + break; + } + case OffsetOfNode::Array: { + // When generating bytecode, we put all the index expressions as Sint64 on + // the stack. + int64_t Index = ArrayIndices[ArrayIndex]; + const ArrayType *AT = S.getCtx().getAsArrayType(CurrentType); + if (!AT) + return false; + CurrentType = AT->getElementType(); + CharUnits ElementSize = S.getCtx().getTypeSizeInChars(CurrentType); + Result += Index * ElementSize; + ++ArrayIndex; + break; + } + case OffsetOfNode::Base: { + const CXXBaseSpecifier *BaseSpec = Node.getBase(); + if (BaseSpec->isVirtual()) + return false; + + // Find the layout of the class whose base we are looking into. + const RecordType *RT = CurrentType->getAs<RecordType>(); + if (!RT) + return false; + const RecordDecl *RD = RT->getDecl(); + if (RD->isInvalidDecl()) + return false; + const ASTRecordLayout &RL = S.getCtx().getASTRecordLayout(RD); + + // Find the base class itself. + CurrentType = BaseSpec->getType(); + const RecordType *BaseRT = CurrentType->getAs<RecordType>(); + if (!BaseRT) + return false; + + // Add the offset to the base. + Result += RL.getBaseClassOffset(cast<CXXRecordDecl>(BaseRT->getDecl())); + break; + } + case OffsetOfNode::Identifier: + llvm_unreachable("Dependent OffsetOfExpr?"); + } + } + + IntResult = Result.getQuantity(); + + return true; +} + +bool SetThreeWayComparisonField(InterpState &S, CodePtr OpPC, + const Pointer &Ptr, const APSInt &IntValue) { + + const Record *R = Ptr.getRecord(); + assert(R); + assert(R->getNumFields() == 1); + + unsigned FieldOffset = R->getField(0u)->Offset; + const Pointer &FieldPtr = Ptr.atField(FieldOffset); + PrimType FieldT = *S.getContext().classify(FieldPtr.getType()); + + INT_TYPE_SWITCH(FieldT, + FieldPtr.deref<T>() = T::from(IntValue.getSExtValue())); + FieldPtr.initialize(); + return true; +} + +bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest) { + assert(Src.isLive() && Dest.isLive()); + + [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc(); + const Descriptor *DestDesc = Dest.getFieldDesc(); + + assert(!DestDesc->isPrimitive() && !SrcDesc->isPrimitive()); + + if (DestDesc->isPrimitiveArray()) { + assert(SrcDesc->isPrimitiveArray()); + assert(SrcDesc->getNumElems() == DestDesc->getNumElems()); + PrimType ET = DestDesc->getPrimType(); + for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) { + Pointer DestElem = Dest.atIndex(I); + TYPE_SWITCH(ET, { + DestElem.deref<T>() = Src.atIndex(I).deref<T>(); + DestElem.initialize(); + }); + } + return true; + } + + if (DestDesc->isRecord()) { + assert(SrcDesc->isRecord()); + assert(SrcDesc->ElemRecord == DestDesc->ElemRecord); + const Record *R = DestDesc->ElemRecord; + for (const Record::Field &F : R->fields()) { + Pointer DestField = Dest.atField(F.Offset); + if (std::optional<PrimType> FT = S.Ctx.classify(F.Decl->getType())) { + TYPE_SWITCH(*FT, { + DestField.deref<T>() = Src.atField(F.Offset).deref<T>(); + DestField.initialize(); + }); + } else { + return Invalid(S, OpPC); + } + } + return true; + } + + // FIXME: Composite types. + + return Invalid(S, OpPC); +} + +} // namespace interp +} // namespace clang diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.cpp b/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.cpp index 9d01bf0333fe..1c37450ae1c6 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.cpp +++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.cpp @@ -7,44 +7,85 @@ //===----------------------------------------------------------------------===// #include "InterpFrame.h" +#include "Boolean.h" +#include "Floating.h" #include "Function.h" -#include "Interp.h" #include "InterpStack.h" +#include "InterpState.h" +#include "MemberPointer.h" +#include "Pointer.h" #include "PrimType.h" #include "Program.h" +#include "clang/AST/ASTContext.h" #include "clang/AST/DeclCXX.h" using namespace clang; using namespace clang::interp; -InterpFrame::InterpFrame(InterpState &S, Function *Func, InterpFrame *Caller, - CodePtr RetPC, Pointer &&This) - : Caller(Caller), S(S), Func(Func), This(std::move(This)), RetPC(RetPC), - ArgSize(Func ? Func->getArgSize() : 0), - Args(static_cast<char *>(S.Stk.top())), FrameOffset(S.Stk.size()) { - if (Func) { - if (unsigned FrameSize = Func->getFrameSize()) { - Locals = std::make_unique<char[]>(FrameSize); - for (auto &Scope : Func->scopes()) { - for (auto &Local : Scope.locals()) { - Block *B = new (localBlock(Local.Offset)) Block(Local.Desc); - B->invokeCtor(); - } - } +InterpFrame::InterpFrame(InterpState &S, const Function *Func, + InterpFrame *Caller, CodePtr RetPC, unsigned ArgSize) + : Caller(Caller), S(S), Depth(Caller ? Caller->Depth + 1 : 0), Func(Func), + RetPC(RetPC), ArgSize(ArgSize), Args(static_cast<char *>(S.Stk.top())), + FrameOffset(S.Stk.size()) { + if (!Func) + return; + + unsigned FrameSize = Func->getFrameSize(); + if (FrameSize == 0) + return; + + Locals = std::make_unique<char[]>(FrameSize); + for (auto &Scope : Func->scopes()) { + for (auto &Local : Scope.locals()) { + Block *B = + new (localBlock(Local.Offset)) Block(S.Ctx.getEvalID(), Local.Desc); + B->invokeCtor(); + new (localInlineDesc(Local.Offset)) InlineDescriptor(Local.Desc); } } } +InterpFrame::InterpFrame(InterpState &S, const Function *Func, CodePtr RetPC, + unsigned VarArgSize) + : InterpFrame(S, Func, S.Current, RetPC, Func->getArgSize() + VarArgSize) { + // As per our calling convention, the this pointer is + // part of the ArgSize. + // If the function has RVO, the RVO pointer is first. + // If the fuction has a This pointer, that one is next. + // Then follow the actual arguments (but those are handled + // in getParamPointer()). + if (Func->hasRVO()) + RVOPtr = stackRef<Pointer>(0); + + if (Func->hasThisPointer()) { + if (Func->hasRVO()) + This = stackRef<Pointer>(sizeof(Pointer)); + else + This = stackRef<Pointer>(0); + } +} + InterpFrame::~InterpFrame() { - if (Func && Func->isConstructor() && This.isBaseClass()) - This.initialize(); for (auto &Param : Params) S.deallocate(reinterpret_cast<Block *>(Param.second.get())); + + // When destroying the InterpFrame, call the Dtor for all block + // that haven't been destroyed via a destroy() op yet. + // This happens when the execution is interruped midway-through. + if (Func) { + for (auto &Scope : Func->scopes()) { + for (auto &Local : Scope.locals()) { + Block *B = localBlock(Local.Offset); + if (B->isInitialized()) + B->invokeDtor(); + } + } + } } void InterpFrame::destroy(unsigned Idx) { for (auto &Local : Func->getScope(Idx).locals()) { - S.deallocate(reinterpret_cast<Block *>(localBlock(Local.Offset))); + S.deallocate(localBlock(Local.Offset)); } } @@ -66,20 +107,19 @@ void print(llvm::raw_ostream &OS, const Pointer &P, ASTContext &Ctx, return; } - auto printDesc = [&OS, &Ctx](Descriptor *Desc) { - if (auto *D = Desc->asDecl()) { + auto printDesc = [&OS, &Ctx](const Descriptor *Desc) { + if (const auto *D = Desc->asDecl()) { // Subfields or named values. - if (auto *VD = dyn_cast<ValueDecl>(D)) { + if (const auto *VD = dyn_cast<ValueDecl>(D)) { OS << *VD; return; } // Base classes. - if (isa<RecordDecl>(D)) { + if (isa<RecordDecl>(D)) return; - } } // Temporary expression. - if (auto *E = Desc->asExpr()) { + if (const auto *E = Desc->asExpr()) { E->printPretty(OS, nullptr, Ctx.getPrintingPolicy()); return; } @@ -94,39 +134,54 @@ void print(llvm::raw_ostream &OS, const Pointer &P, ASTContext &Ctx, F = F.isArrayElement() ? F.getArray().expand() : F.getBase(); } + // Drop the first pointer since we print it unconditionally anyway. + if (!Levels.empty()) + Levels.erase(Levels.begin()); + printDesc(P.getDeclDesc()); - for (auto It = Levels.rbegin(); It != Levels.rend(); ++It) { - if (It->inArray()) { - OS << "[" << It->expand().getIndex() << "]"; + for (const auto &It : Levels) { + if (It.inArray()) { + OS << "[" << It.expand().getIndex() << "]"; continue; } - if (auto Index = It->getIndex()) { + if (auto Index = It.getIndex()) { OS << " + " << Index; continue; } OS << "."; - printDesc(It->getFieldDesc()); + printDesc(It.getFieldDesc()); } } -void InterpFrame::describe(llvm::raw_ostream &OS) { +void InterpFrame::describe(llvm::raw_ostream &OS) const { + // We create frames for builtin functions as well, but we can't reliably + // diagnose them. The 'in call to' diagnostics for them add no value to the + // user _and_ it doesn't generally work since the argument types don't always + // match the function prototype. Just ignore them. + // Similarly, for lambda static invokers, we would just print __invoke(). + if (const auto *F = getFunction(); + F && (F->isBuiltin() || F->isLambdaStaticInvoker())) + return; + const FunctionDecl *F = getCallee(); - auto *M = dyn_cast<CXXMethodDecl>(F); - if (M && M->isInstance() && !isa<CXXConstructorDecl>(F)) { + if (const auto *M = dyn_cast<CXXMethodDecl>(F); + M && M->isInstance() && !isa<CXXConstructorDecl>(F)) { print(OS, This, S.getCtx(), S.getCtx().getRecordType(M->getParent())); OS << "->"; } - OS << *F << "("; - unsigned Off = Func->hasRVO() ? primSize(PT_Ptr) : 0; + + F->getNameForDiagnostic(OS, S.getCtx().getPrintingPolicy(), + /*Qualified=*/false); + OS << '('; + unsigned Off = 0; + + Off += Func->hasRVO() ? primSize(PT_Ptr) : 0; + Off += Func->hasThisPointer() ? primSize(PT_Ptr) : 0; + for (unsigned I = 0, N = F->getNumParams(); I < N; ++I) { QualType Ty = F->getParamDecl(I)->getType(); - PrimType PrimTy; - if (llvm::Optional<PrimType> T = S.Ctx.classify(Ty)) { - PrimTy = *T; - } else { - PrimTy = PT_Ptr; - } + PrimType PrimTy = S.Ctx.classify(Ty).value_or(PT_Ptr); TYPE_SWITCH(PrimTy, print(OS, stackRef<T>(Off), S.getCtx(), Ty)); Off += align(primSize(PrimTy)); @@ -142,34 +197,37 @@ Frame *InterpFrame::getCaller() const { return S.getSplitFrame(); } -SourceLocation InterpFrame::getCallLocation() const { - if (!Caller->Func) - return S.getLocation(nullptr, {}); - return S.getLocation(Caller->Func, RetPC - sizeof(uintptr_t)); +SourceRange InterpFrame::getCallRange() const { + if (!Caller->Func) { + if (SourceRange NullRange = S.getRange(nullptr, {}); NullRange.isValid()) + return NullRange; + return S.EvalLocation; + } + return S.getRange(Caller->Func, RetPC - sizeof(uintptr_t)); } const FunctionDecl *InterpFrame::getCallee() const { + if (!Func) + return nullptr; return Func->getDecl(); } -Pointer InterpFrame::getLocalPointer(unsigned Offset) { +Pointer InterpFrame::getLocalPointer(unsigned Offset) const { assert(Offset < Func->getFrameSize() && "Invalid local offset."); - return Pointer( - reinterpret_cast<Block *>(Locals.get() + Offset - sizeof(Block))); + return Pointer(localBlock(Offset)); } Pointer InterpFrame::getParamPointer(unsigned Off) { // Return the block if it was created previously. - auto Pt = Params.find(Off); - if (Pt != Params.end()) { + if (auto Pt = Params.find(Off); Pt != Params.end()) return Pointer(reinterpret_cast<Block *>(Pt->second.get())); - } // Allocate memory to store the parameter and the block metadata. const auto &Desc = Func->getParamDescriptor(Off); size_t BlockSize = sizeof(Block) + Desc.second->getAllocSize(); auto Memory = std::make_unique<char[]>(BlockSize); - auto *B = new (Memory.get()) Block(Desc.second); + auto *B = new (Memory.get()) Block(S.Ctx.getEvalID(), Desc.second); + B->invokeCtor(); // Copy the initial value. TYPE_SWITCH(Desc.first, new (B->data()) T(stackRef<T>(Off))); @@ -180,14 +238,31 @@ Pointer InterpFrame::getParamPointer(unsigned Off) { } SourceInfo InterpFrame::getSource(CodePtr PC) const { + // Implicitly created functions don't have any code we could point at, + // so return the call site. + if (Func && (!Func->hasBody() || Func->getDecl()->isImplicit()) && Caller) + return Caller->getSource(RetPC); + return S.getSource(Func, PC); } const Expr *InterpFrame::getExpr(CodePtr PC) const { + if (Func && (!Func->hasBody() || Func->getDecl()->isImplicit()) && Caller) + return Caller->getExpr(RetPC); + return S.getExpr(Func, PC); } SourceLocation InterpFrame::getLocation(CodePtr PC) const { + if (Func && (!Func->hasBody() || Func->getDecl()->isImplicit()) && Caller) + return Caller->getLocation(RetPC); + return S.getLocation(Func, PC); } +SourceRange InterpFrame::getRange(CodePtr PC) const { + if (Func && (!Func->hasBody() || Func->getDecl()->isImplicit()) && Caller) + return Caller->getRange(RetPC); + + return S.getRange(Func, PC); +} diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.h b/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.h index 304e2ad66537..4a312a71bcf1 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.h +++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.h @@ -14,16 +14,13 @@ #define LLVM_CLANG_AST_INTERP_INTERPFRAME_H #include "Frame.h" -#include "Pointer.h" #include "Program.h" -#include "State.h" -#include <cstdint> -#include <vector> namespace clang { namespace interp { class Function; class InterpState; +class Pointer; /// Frame storing local variables. class InterpFrame final : public Frame { @@ -32,8 +29,15 @@ public: InterpFrame *Caller; /// Creates a new frame for a method call. - InterpFrame(InterpState &S, Function *Func, InterpFrame *Caller, - CodePtr RetPC, Pointer &&This); + InterpFrame(InterpState &S, const Function *Func, InterpFrame *Caller, + CodePtr RetPC, unsigned ArgSize); + + /// Creates a new frame with the values that make sense. + /// I.e., the caller is the current frame of S, + /// the This() pointer is the current Pointer on the top of S's stack, + /// and the RVO pointer is before that. + InterpFrame(InterpState &S, const Function *Func, CodePtr RetPC, + unsigned VarArgSize = 0); /// Destroys the frame, killing all live pointers to stack slots. ~InterpFrame(); @@ -45,44 +49,43 @@ public: void popArgs(); /// Describes the frame with arguments for diagnostic purposes. - void describe(llvm::raw_ostream &OS) override; + void describe(llvm::raw_ostream &OS) const override; /// Returns the parent frame object. Frame *getCaller() const override; /// Returns the location of the call to the frame. - SourceLocation getCallLocation() const override; + SourceRange getCallRange() const override; /// Returns the caller. const FunctionDecl *getCallee() const override; /// Returns the current function. - Function *getFunction() const { return Func; } + const Function *getFunction() const { return Func; } /// Returns the offset on the stack at which the frame starts. size_t getFrameOffset() const { return FrameOffset; } /// Returns the value of a local variable. - template <typename T> const T &getLocal(unsigned Offset) { + template <typename T> const T &getLocal(unsigned Offset) const { return localRef<T>(Offset); } /// Mutates a local variable. template <typename T> void setLocal(unsigned Offset, const T &Value) { localRef<T>(Offset) = Value; + localInlineDesc(Offset)->IsInitialized = true; } /// Returns a pointer to a local variables. - Pointer getLocalPointer(unsigned Offset); + Pointer getLocalPointer(unsigned Offset) const; /// Returns the value of an argument. - template <typename T> const T &getParam(unsigned Offset) { + template <typename T> const T &getParam(unsigned Offset) const { auto Pt = Params.find(Offset); - if (Pt == Params.end()) { + if (Pt == Params.end()) return stackRef<T>(Offset); - } else { - return Pointer(reinterpret_cast<Block *>(Pt->second.get())).deref<T>(); - } + return Pointer(reinterpret_cast<Block *>(Pt->second.get())).deref<T>(); } /// Mutates a local copy of a parameter. @@ -96,6 +99,9 @@ public: /// Returns the 'this' pointer. const Pointer &getThis() const { return This; } + /// Returns the RVO pointer, if the Function has one. + const Pointer &getRVOPtr() const { return RVOPtr; } + /// Checks if the frame is a root frame - return should quit the interpreter. bool isRoot() const { return !Func; } @@ -109,30 +115,46 @@ public: virtual SourceInfo getSource(CodePtr PC) const; const Expr *getExpr(CodePtr PC) const; SourceLocation getLocation(CodePtr PC) const; + SourceRange getRange(CodePtr PC) const; + + unsigned getDepth() const { return Depth; } + + void dump() const { dump(llvm::errs(), 0); } + void dump(llvm::raw_ostream &OS, unsigned Indent = 0) const; private: /// Returns an original argument from the stack. - template <typename T> const T &stackRef(unsigned Offset) { + template <typename T> const T &stackRef(unsigned Offset) const { + assert(Args); return *reinterpret_cast<const T *>(Args - ArgSize + Offset); } /// Returns an offset to a local. - template <typename T> T &localRef(unsigned Offset) { - return *reinterpret_cast<T *>(Locals.get() + Offset); + template <typename T> T &localRef(unsigned Offset) const { + return getLocalPointer(Offset).deref<T>(); } /// Returns a pointer to a local's block. - void *localBlock(unsigned Offset) { - return Locals.get() + Offset - sizeof(Block); + Block *localBlock(unsigned Offset) const { + return reinterpret_cast<Block *>(Locals.get() + Offset - sizeof(Block)); + } + + /// Returns the inline descriptor of the local. + InlineDescriptor *localInlineDesc(unsigned Offset) const { + return reinterpret_cast<InlineDescriptor *>(Locals.get() + Offset); } private: /// Reference to the interpreter state. InterpState &S; + /// Depth of this frame. + unsigned Depth; /// Reference to the function being executed. - Function *Func; + const Function *Func; /// Current object pointer for methods. Pointer This; + /// Pointer the non-primitive return value gets constructed in. + Pointer RVOPtr; /// Return address. CodePtr RetPC; /// The size of all the arguments. diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpShared.cpp b/contrib/llvm-project/clang/lib/AST/Interp/InterpShared.cpp new file mode 100644 index 000000000000..6af03691f1b2 --- /dev/null +++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpShared.cpp @@ -0,0 +1,42 @@ +//===--- InterpShared.cpp ---------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "InterpShared.h" +#include "clang/AST/Attr.h" +#include "llvm/ADT/BitVector.h" + +namespace clang { +namespace interp { + +llvm::BitVector collectNonNullArgs(const FunctionDecl *F, + const llvm::ArrayRef<const Expr *> &Args) { + llvm::BitVector NonNullArgs; + if (!F) + return NonNullArgs; + + assert(F); + NonNullArgs.resize(Args.size()); + + for (const auto *Attr : F->specific_attrs<NonNullAttr>()) { + if (!Attr->args_size()) { + NonNullArgs.set(); + break; + } else + for (auto Idx : Attr->args()) { + unsigned ASTIdx = Idx.getASTIndex(); + if (ASTIdx >= Args.size()) + continue; + NonNullArgs[ASTIdx] = true; + } + } + + return NonNullArgs; +} + +} // namespace interp +} // namespace clang diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpShared.h b/contrib/llvm-project/clang/lib/AST/Interp/InterpShared.h new file mode 100644 index 000000000000..8c5e0bee22c9 --- /dev/null +++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpShared.h @@ -0,0 +1,26 @@ +//===--- InterpShared.h -----------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_AST_INTERP_SHARED_H +#define LLVM_CLANG_LIB_AST_INTERP_SHARED_H + +#include "llvm/ADT/BitVector.h" + +namespace clang { +class FunctionDecl; +class Expr; + +namespace interp { + +llvm::BitVector collectNonNullArgs(const FunctionDecl *F, + const llvm::ArrayRef<const Expr *> &Args); + +} // namespace interp +} // namespace clang + +#endif diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.cpp b/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.cpp index 5c803f3d9424..c7024740d322 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.cpp +++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.cpp @@ -6,9 +6,14 @@ // //===----------------------------------------------------------------------===// +#include "InterpStack.h" +#include "Boolean.h" +#include "Floating.h" +#include "Integral.h" +#include "MemberPointer.h" +#include "Pointer.h" #include <cassert> #include <cstdlib> -#include "InterpStack.h" using namespace clang; using namespace clang::interp; @@ -19,11 +24,14 @@ InterpStack::~InterpStack() { void InterpStack::clear() { if (Chunk && Chunk->Next) - free(Chunk->Next); + std::free(Chunk->Next); if (Chunk) - free(Chunk); + std::free(Chunk); Chunk = nullptr; StackSize = 0; +#ifndef NDEBUG + ItemTypes.clear(); +#endif } void *InterpStack::grow(size_t Size) { @@ -33,7 +41,7 @@ void *InterpStack::grow(size_t Size) { if (Chunk && Chunk->Next) { Chunk = Chunk->Next; } else { - StackChunk *Next = new (malloc(ChunkSize)) StackChunk(Chunk); + StackChunk *Next = new (std::malloc(ChunkSize)) StackChunk(Chunk); if (Chunk) Chunk->Next = Next; Chunk = Next; @@ -46,7 +54,7 @@ void *InterpStack::grow(size_t Size) { return Object; } -void *InterpStack::peek(size_t Size) { +void *InterpStack::peekData(size_t Size) const { assert(Chunk && "Stack is empty!"); StackChunk *Ptr = Chunk; @@ -65,7 +73,7 @@ void InterpStack::shrink(size_t Size) { while (Size > Chunk->size()) { Size -= Chunk->size(); if (Chunk->Next) { - free(Chunk->Next); + std::free(Chunk->Next); Chunk->Next = nullptr; } Chunk->End = Chunk->start(); @@ -76,3 +84,29 @@ void InterpStack::shrink(size_t Size) { Chunk->End -= Size; StackSize -= Size; } + +void InterpStack::dump() const { +#ifndef NDEBUG + llvm::errs() << "Items: " << ItemTypes.size() << ". Size: " << size() << '\n'; + if (ItemTypes.empty()) + return; + + size_t Index = 0; + size_t Offset = 0; + + // The type of the item on the top of the stack is inserted to the back + // of the vector, so the iteration has to happen backwards. + for (auto TyIt = ItemTypes.rbegin(); TyIt != ItemTypes.rend(); ++TyIt) { + Offset += align(primSize(*TyIt)); + + llvm::errs() << Index << '/' << Offset << ": "; + TYPE_SWITCH(*TyIt, { + const T &V = peek<T>(Offset); + llvm::errs() << V; + }); + llvm::errs() << '\n'; + + ++Index; + } +#endif +} diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.h b/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.h index 127adb6b8eba..4966e2870de6 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.h +++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.h @@ -13,7 +13,12 @@ #ifndef LLVM_CLANG_AST_INTERP_INTERPSTACK_H #define LLVM_CLANG_AST_INTERP_INTERPSTACK_H +#include "FunctionPointer.h" +#include "IntegralAP.h" +#include "MemberPointer.h" +#include "PrimType.h" #include <memory> +#include <vector> namespace clang { namespace interp { @@ -29,31 +34,52 @@ public: /// Constructs a value in place on the top of the stack. template <typename T, typename... Tys> void push(Tys &&... Args) { new (grow(aligned_size<T>())) T(std::forward<Tys>(Args)...); +#ifndef NDEBUG + ItemTypes.push_back(toPrimType<T>()); +#endif } /// Returns the value from the top of the stack and removes it. template <typename T> T pop() { - auto *Ptr = &peek<T>(); - auto Value = std::move(*Ptr); - Ptr->~T(); +#ifndef NDEBUG + assert(!ItemTypes.empty()); + assert(ItemTypes.back() == toPrimType<T>()); + ItemTypes.pop_back(); +#endif + T *Ptr = &peekInternal<T>(); + T Value = std::move(*Ptr); shrink(aligned_size<T>()); return Value; } /// Discards the top value from the stack. template <typename T> void discard() { - auto *Ptr = &peek<T>(); +#ifndef NDEBUG + assert(!ItemTypes.empty()); + assert(ItemTypes.back() == toPrimType<T>()); + ItemTypes.pop_back(); +#endif + T *Ptr = &peekInternal<T>(); Ptr->~T(); shrink(aligned_size<T>()); } /// Returns a reference to the value on the top of the stack. - template <typename T> T &peek() { - return *reinterpret_cast<T *>(peek(aligned_size<T>())); + template <typename T> T &peek() const { +#ifndef NDEBUG + assert(!ItemTypes.empty()); + assert(ItemTypes.back() == toPrimType<T>()); +#endif + return peekInternal<T>(); + } + + template <typename T> T &peek(size_t Offset) const { + assert(aligned(Offset)); + return *reinterpret_cast<T *>(peekData(Offset)); } /// Returns a pointer to the top object. - void *top() { return Chunk ? peek(0) : nullptr; } + void *top() const { return Chunk ? peekData(0) : nullptr; } /// Returns the size of the stack in bytes. size_t size() const { return StackSize; } @@ -61,6 +87,12 @@ public: /// Clears the stack without calling any destructors. void clear(); + /// Returns whether the stack is empty. + bool empty() const { return StackSize == 0; } + + /// dump the stack contents to stderr. + void dump() const; + private: /// All stack slots are aligned to the native pointer alignment for storage. /// The size of an object is rounded up to a pointer alignment multiple. @@ -69,10 +101,15 @@ private: return ((sizeof(T) + PtrAlign - 1) / PtrAlign) * PtrAlign; } - /// Grows the stack to accomodate a value and returns a pointer to it. + /// Like the public peek(), but without the debug type checks. + template <typename T> T &peekInternal() const { + return *reinterpret_cast<T *>(peekData(aligned_size<T>())); + } + + /// Grows the stack to accommodate a value and returns a pointer to it. void *grow(size_t Size); /// Returns a pointer from the top of the stack. - void *peek(size_t Size); + void *peekData(size_t Size) const; /// Shrinks the stack. void shrink(size_t Size); @@ -94,10 +131,13 @@ private: : Next(nullptr), Prev(Prev), End(reinterpret_cast<char *>(this + 1)) {} /// Returns the size of the chunk, minus the header. - size_t size() { return End - start(); } + size_t size() const { return End - start(); } /// Returns a pointer to the start of the data region. char *start() { return reinterpret_cast<char *>(this + 1); } + const char *start() const { + return reinterpret_cast<const char *>(this + 1); + } }; static_assert(sizeof(StackChunk) < ChunkSize, "Invalid chunk size"); @@ -105,6 +145,55 @@ private: StackChunk *Chunk = nullptr; /// Total size of the stack. size_t StackSize = 0; + +#ifndef NDEBUG + /// vector recording the type of data we pushed into the stack. + std::vector<PrimType> ItemTypes; + + template <typename T> static constexpr PrimType toPrimType() { + if constexpr (std::is_same_v<T, Pointer>) + return PT_Ptr; + else if constexpr (std::is_same_v<T, bool> || + std::is_same_v<T, Boolean>) + return PT_Bool; + else if constexpr (std::is_same_v<T, int8_t> || + std::is_same_v<T, Integral<8, true>>) + return PT_Sint8; + else if constexpr (std::is_same_v<T, uint8_t> || + std::is_same_v<T, Integral<8, false>>) + return PT_Uint8; + else if constexpr (std::is_same_v<T, int16_t> || + std::is_same_v<T, Integral<16, true>>) + return PT_Sint16; + else if constexpr (std::is_same_v<T, uint16_t> || + std::is_same_v<T, Integral<16, false>>) + return PT_Uint16; + else if constexpr (std::is_same_v<T, int32_t> || + std::is_same_v<T, Integral<32, true>>) + return PT_Sint32; + else if constexpr (std::is_same_v<T, uint32_t> || + std::is_same_v<T, Integral<32, false>>) + return PT_Uint32; + else if constexpr (std::is_same_v<T, int64_t> || + std::is_same_v<T, Integral<64, true>>) + return PT_Sint64; + else if constexpr (std::is_same_v<T, uint64_t> || + std::is_same_v<T, Integral<64, false>>) + return PT_Uint64; + else if constexpr (std::is_same_v<T, Floating>) + return PT_Float; + else if constexpr (std::is_same_v<T, FunctionPointer>) + return PT_FnPtr; + else if constexpr (std::is_same_v<T, IntegralAP<true>>) + return PT_IntAP; + else if constexpr (std::is_same_v<T, IntegralAP<false>>) + return PT_IntAP; + else if constexpr (std::is_same_v<T, MemberPointer>) + return PT_MemberPtr; + + llvm_unreachable("unknown type push()'ed into InterpStack"); + } +#endif }; } // namespace interp diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpState.cpp b/contrib/llvm-project/clang/lib/AST/Interp/InterpState.cpp index 25684f3c0939..4ea05305540e 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/InterpState.cpp +++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpState.cpp @@ -7,24 +7,17 @@ //===----------------------------------------------------------------------===// #include "InterpState.h" -#include <limits> -#include "Function.h" #include "InterpFrame.h" #include "InterpStack.h" -#include "Opcode.h" -#include "PrimType.h" #include "Program.h" #include "State.h" using namespace clang; using namespace clang::interp; -using APSInt = llvm::APSInt; - InterpState::InterpState(State &Parent, Program &P, InterpStack &Stk, Context &Ctx, SourceMapper *M) - : Parent(Parent), M(M), P(P), Stk(Stk), Ctx(Ctx), Current(nullptr), - CallStackDepth(Parent.getCallStackDepth() + 1) {} + : Parent(Parent), M(M), P(P), Stk(Stk), Ctx(Ctx), Current(nullptr) {} InterpState::~InterpState() { while (Current) { @@ -35,17 +28,27 @@ InterpState::~InterpState() { while (DeadBlocks) { DeadBlock *Next = DeadBlocks->Next; - free(DeadBlocks); + std::free(DeadBlocks); DeadBlocks = Next; } } +void InterpState::cleanup() { + // As a last resort, make sure all pointers still pointing to a dead block + // don't point to it anymore. + for (DeadBlock *DB = DeadBlocks; DB; DB = DB->Next) { + for (Pointer *P = DB->B.Pointers; P; P = P->Next) { + P->PointeeStorage.BS.Pointee = nullptr; + } + } + + Alloc.cleanup(); +} + Frame *InterpState::getCurrentFrame() { - if (Current && Current->Caller) { + if (Current && Current->Caller) return Current; - } else { - return Parent.getCurrentFrame(); - } + return Parent.getCurrentFrame(); } bool InterpState::reportOverflow(const Expr *E, const llvm::APSInt &Value) { @@ -55,20 +58,45 @@ bool InterpState::reportOverflow(const Expr *E, const llvm::APSInt &Value) { } void InterpState::deallocate(Block *B) { - Descriptor *Desc = B->getDescriptor(); + assert(B); + const Descriptor *Desc = B->getDescriptor(); + assert(Desc); + if (B->hasPointers()) { size_t Size = B->getSize(); // Allocate a new block, transferring over pointers. - char *Memory = reinterpret_cast<char *>(malloc(sizeof(DeadBlock) + Size)); + char *Memory = + reinterpret_cast<char *>(std::malloc(sizeof(DeadBlock) + Size)); auto *D = new (Memory) DeadBlock(DeadBlocks, B); + std::memset(D->B.rawData(), 0, D->B.getSize()); - // Move data from one block to another. - if (Desc->MoveFn) + // Move data and metadata from the old block to the new (dead)block. + if (B->IsInitialized && Desc->MoveFn) { Desc->MoveFn(B, B->data(), D->data(), Desc); - } else { - // Free storage, if necessary. - if (Desc->DtorFn) - Desc->DtorFn(B, B->data(), Desc); + if (Desc->getMetadataSize() > 0) + std::memcpy(D->rawData(), B->rawData(), Desc->getMetadataSize()); + } + D->B.IsInitialized = B->IsInitialized; + + // We moved the contents over to the DeadBlock. + B->IsInitialized = false; + } else if (B->IsInitialized) { + B->invokeDtor(); + } +} + +bool InterpState::maybeDiagnoseDanglingAllocations() { + bool NoAllocationsLeft = (Alloc.getNumAllocations() == 0); + + if (!checkingPotentialConstantExpression()) { + for (const auto &It : Alloc.allocation_sites()) { + assert(It.second.size() > 0); + + const Expr *Source = It.first; + CCEDiag(Source->getExprLoc(), diag::note_constexpr_memory_leak) + << (It.second.size() - 1) << Source->getSourceRange(); + } } + return NoAllocationsLeft; } diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpState.h b/contrib/llvm-project/clang/lib/AST/Interp/InterpState.h index c2209bbcbb92..61ee54331c65 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/InterpState.h +++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpState.h @@ -14,7 +14,9 @@ #define LLVM_CLANG_AST_INTERP_INTERPSTATE_H #include "Context.h" +#include "DynamicAllocator.h" #include "Function.h" +#include "InterpFrame.h" #include "InterpStack.h" #include "State.h" #include "clang/AST/APValue.h" @@ -38,15 +40,22 @@ public: ~InterpState(); + void cleanup(); + + InterpState(const InterpState &) = delete; + InterpState &operator=(const InterpState &) = delete; + // Stack frame accessors. Frame *getSplitFrame() { return Parent.getCurrentFrame(); } Frame *getCurrentFrame() override; - unsigned getCallStackDepth() override { return CallStackDepth; } + unsigned getCallStackDepth() override { + return Current ? (Current->getDepth() + 1) : 1; + } const Frame *getBottomFrame() const override { return Parent.getBottomFrame(); } - // Acces objects from the walker context. + // Access objects from the walker context. Expr::EvalStatus &getEvalStatus() const override { return Parent.getEvalStatus(); } @@ -65,6 +74,7 @@ public: bool noteUndefinedBehavior() override { return Parent.noteUndefinedBehavior(); } + bool inConstantContext() const { return Parent.InConstantContext; } bool hasActiveDiagnostic() override { return Parent.hasActiveDiagnostic(); } void setActiveDiagnostic(bool Flag) override { Parent.setActiveDiagnostic(Flag); @@ -81,17 +91,35 @@ public: void deallocate(Block *B); /// Delegates source mapping to the mapper. - SourceInfo getSource(Function *F, CodePtr PC) const override { - return M ? M->getSource(F, PC) : F->getSource(PC); + SourceInfo getSource(const Function *F, CodePtr PC) const override { + if (M) + return M->getSource(F, PC); + + assert(F && "Function cannot be null"); + return F->getSource(PC); } + Context &getContext() const { return Ctx; } + + void setEvalLocation(SourceLocation SL) { this->EvalLocation = SL; } + + DynamicAllocator &getAllocator() { return Alloc; } + + /// Diagnose any dynamic allocations that haven't been freed yet. + /// Will return \c false if there were any allocations to diagnose, + /// \c true otherwise. + bool maybeDiagnoseDanglingAllocations(); + private: + friend class EvaluationResult; /// AST Walker state. State &Parent; /// Dead block chain. DeadBlock *DeadBlocks = nullptr; /// Reference to the offset-source mapping. SourceMapper *M; + /// Allocator used for dynamic allocations performed via the program. + DynamicAllocator Alloc; public: /// Reference to the module containing all bytecode. @@ -102,8 +130,14 @@ public: Context &Ctx; /// The current frame. InterpFrame *Current = nullptr; - /// Call stack depth. - unsigned CallStackDepth; + /// Source location of the evaluating expression + SourceLocation EvalLocation; + /// Declaration we're initializing/evaluting, if any. + const VarDecl *EvaluatingDecl = nullptr; + + llvm::SmallVector< + std::pair<const Expr *, const LifetimeExtendedTemporaryDecl *>> + SeenGlobalTemporaries; }; } // namespace interp diff --git a/contrib/llvm-project/clang/lib/AST/Interp/MemberPointer.cpp b/contrib/llvm-project/clang/lib/AST/Interp/MemberPointer.cpp new file mode 100644 index 000000000000..0c1b6edc5f7e --- /dev/null +++ b/contrib/llvm-project/clang/lib/AST/Interp/MemberPointer.cpp @@ -0,0 +1,76 @@ +//===------------------------- MemberPointer.cpp ----------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "MemberPointer.h" +#include "Context.h" +#include "FunctionPointer.h" +#include "Program.h" +#include "Record.h" + +namespace clang { +namespace interp { + +std::optional<Pointer> MemberPointer::toPointer(const Context &Ctx) const { + if (!Dcl || isa<FunctionDecl>(Dcl)) + return Base; + const FieldDecl *FD = cast<FieldDecl>(Dcl); + assert(FD); + + if (!Base.isBlockPointer()) + return std::nullopt; + + Pointer CastedBase = + (PtrOffset < 0 ? Base.atField(-PtrOffset) : Base.atFieldSub(PtrOffset)); + + const Record *BaseRecord = CastedBase.getRecord(); + if (!BaseRecord) + return std::nullopt; + + assert(BaseRecord); + if (FD->getParent() == BaseRecord->getDecl()) + return CastedBase.atField(BaseRecord->getField(FD)->Offset); + + const RecordDecl *FieldParent = FD->getParent(); + const Record *FieldRecord = Ctx.getRecord(FieldParent); + + unsigned Offset = 0; + Offset += FieldRecord->getField(FD)->Offset; + Offset += CastedBase.block()->getDescriptor()->getMetadataSize(); + + if (Offset > CastedBase.block()->getSize()) + return std::nullopt; + + if (const RecordDecl *BaseDecl = Base.getDeclPtr().getRecord()->getDecl(); + BaseDecl != FieldParent) + Offset += Ctx.collectBaseOffset(FieldParent, BaseDecl); + + if (Offset > CastedBase.block()->getSize()) + return std::nullopt; + + assert(Offset <= CastedBase.block()->getSize()); + return Pointer(const_cast<Block *>(Base.block()), Offset, Offset); +} + +FunctionPointer MemberPointer::toFunctionPointer(const Context &Ctx) const { + return FunctionPointer(Ctx.getProgram().getFunction(cast<FunctionDecl>(Dcl))); +} + +APValue MemberPointer::toAPValue(const ASTContext &ASTCtx) const { + if (isZero()) + return APValue(static_cast<ValueDecl *>(nullptr), /*IsDerivedMember=*/false, + /*Path=*/{}); + + if (hasBase()) + return Base.toAPValue(ASTCtx); + + return APValue(cast<ValueDecl>(getDecl()), /*IsDerivedMember=*/false, + /*Path=*/{}); +} + +} // namespace interp +} // namespace clang diff --git a/contrib/llvm-project/clang/lib/AST/Interp/MemberPointer.h b/contrib/llvm-project/clang/lib/AST/Interp/MemberPointer.h new file mode 100644 index 000000000000..2b3be124db42 --- /dev/null +++ b/contrib/llvm-project/clang/lib/AST/Interp/MemberPointer.h @@ -0,0 +1,112 @@ +//===------------------------- MemberPointer.h ------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_AST_INTERP_MEMBER_POINTER_H +#define LLVM_CLANG_AST_INTERP_MEMBER_POINTER_H + +#include "Pointer.h" +#include <optional> + +namespace clang { +class ASTContext; +namespace interp { + +class Context; +class FunctionPointer; + +class MemberPointer final { +private: + Pointer Base; + const Decl *Dcl = nullptr; + int32_t PtrOffset = 0; + + MemberPointer(Pointer Base, const Decl *Dcl, int32_t PtrOffset) + : Base(Base), Dcl(Dcl), PtrOffset(PtrOffset) {} + +public: + MemberPointer() = default; + MemberPointer(Pointer Base, const Decl *Dcl) : Base(Base), Dcl(Dcl) {} + MemberPointer(uint32_t Address, const Descriptor *D) { + // We only reach this for Address == 0, when creating a null member pointer. + assert(Address == 0); + } + + MemberPointer(const Decl *D) : Dcl(D) { + assert((isa<FieldDecl, IndirectFieldDecl, CXXMethodDecl>(D))); + } + + uint64_t getIntegerRepresentation() const { + assert( + false && + "getIntegerRepresentation() shouldn't be reachable for MemberPointers"); + return 17; + } + + std::optional<Pointer> toPointer(const Context &Ctx) const; + + FunctionPointer toFunctionPointer(const Context &Ctx) const; + + Pointer getBase() const { + if (PtrOffset < 0) + return Base.atField(-PtrOffset); + return Base.atFieldSub(PtrOffset); + } + bool isMemberFunctionPointer() const { + return isa_and_nonnull<CXXMethodDecl>(Dcl); + } + const CXXMethodDecl *getMemberFunction() const { + return dyn_cast_if_present<CXXMethodDecl>(Dcl); + } + const FieldDecl *getField() const { + return dyn_cast_if_present<FieldDecl>(Dcl); + } + + bool hasDecl() const { return Dcl; } + const Decl *getDecl() const { return Dcl; } + + MemberPointer atInstanceBase(unsigned Offset) const { + if (Base.isZero()) + return MemberPointer(Base, Dcl, Offset); + return MemberPointer(this->Base, Dcl, Offset + PtrOffset); + } + + MemberPointer takeInstance(Pointer Instance) const { + assert(this->Base.isZero()); + return MemberPointer(Instance, this->Dcl, this->PtrOffset); + } + + APValue toAPValue(const ASTContext &) const; + + bool isZero() const { return Base.isZero() && !Dcl; } + bool hasBase() const { return !Base.isZero(); } + + void print(llvm::raw_ostream &OS) const { + OS << "MemberPtr(" << Base << " " << (const void *)Dcl << " + " << PtrOffset + << ")"; + } + + std::string toDiagnosticString(const ASTContext &Ctx) const { + return "FIXME"; + } + + ComparisonCategoryResult compare(const MemberPointer &RHS) const { + if (this->Dcl == RHS.Dcl) + return ComparisonCategoryResult::Equal; + return ComparisonCategoryResult::Unordered; + } +}; + +inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, MemberPointer FP) { + FP.print(OS); + return OS; +} + +} // namespace interp +} // namespace clang + +#endif diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Opcodes.td b/contrib/llvm-project/clang/lib/AST/Interp/Opcodes.td index 4aba5f5cd83c..9f29fa927271 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/Opcodes.td +++ b/contrib/llvm-project/clang/lib/AST/Interp/Opcodes.td @@ -25,13 +25,18 @@ def Sint32 : Type; def Uint32 : Type; def Sint64 : Type; def Uint64 : Type; +def IntAP : Type; +def IntAPS : Type; +def Float : Type; def Ptr : Type; +def FnPtr : Type; +def MemberPtr : Type; //===----------------------------------------------------------------------===// // Types transferred to the interpreter. //===----------------------------------------------------------------------===// -class ArgType { string Name = ?; } +class ArgType { string Name = ?; bit AsRef = false; } def ArgSint8 : ArgType { let Name = "int8_t"; } def ArgUint8 : ArgType { let Name = "uint8_t"; } def ArgSint16 : ArgType { let Name = "int16_t"; } @@ -40,45 +45,77 @@ def ArgSint32 : ArgType { let Name = "int32_t"; } def ArgUint32 : ArgType { let Name = "uint32_t"; } def ArgSint64 : ArgType { let Name = "int64_t"; } def ArgUint64 : ArgType { let Name = "uint64_t"; } +def ArgIntAP : ArgType { let Name = "IntegralAP<false>"; let AsRef = true; } +def ArgIntAPS : ArgType { let Name = "IntegralAP<true>"; let AsRef = true; } +def ArgFloat : ArgType { let Name = "Floating"; let AsRef = true; } def ArgBool : ArgType { let Name = "bool"; } -def ArgFunction : ArgType { let Name = "Function *"; } -def ArgRecord : ArgType { let Name = "Record *"; } - -def ArgSema : ArgType { let Name = "const fltSemantics *"; } - -def ArgExpr : ArgType { let Name = "const Expr *"; } -def ArgFloatingLiteral : ArgType { let Name = "const FloatingLiteral *"; } -def ArgCXXMethodDecl : ArgType { let Name = "const CXXMethodDecl *"; } -def ArgFunctionDecl : ArgType { let Name = "const FunctionDecl *"; } +def ArgFunction : ArgType { let Name = "const Function *"; } def ArgRecordDecl : ArgType { let Name = "const RecordDecl *"; } -def ArgCXXRecordDecl : ArgType { let Name = "const CXXRecordDecl *"; } -def ArgValueDecl : ArgType { let Name = "const ValueDecl *"; } def ArgRecordField : ArgType { let Name = "const Record::Field *"; } +def ArgFltSemantics : ArgType { let Name = "const llvm::fltSemantics *"; } +def ArgRoundingMode : ArgType { let Name = "llvm::RoundingMode"; } +def ArgLETD: ArgType { let Name = "const LifetimeExtendedTemporaryDecl *"; } +def ArgCastKind : ArgType { let Name = "CastKind"; } +def ArgCallExpr : ArgType { let Name = "const CallExpr *"; } +def ArgExpr : ArgType { let Name = "const Expr *"; } +def ArgOffsetOfExpr : ArgType { let Name = "const OffsetOfExpr *"; } +def ArgDeclRef : ArgType { let Name = "const DeclRefExpr *"; } +def ArgCCI : ArgType { let Name = "const ComparisonCategoryInfo *"; } +def ArgDecl : ArgType { let Name = "const Decl*"; } +def ArgVarDecl : ArgType { let Name = "const VarDecl*"; } +def ArgDesc : ArgType { let Name = "const Descriptor *"; } +def ArgPrimType : ArgType { let Name = "PrimType"; } +def ArgEnumDecl : ArgType { let Name = "const EnumDecl *"; } //===----------------------------------------------------------------------===// -// Classes of types intructions operate on. +// Classes of types instructions operate on. //===----------------------------------------------------------------------===// class TypeClass { list<Type> Types; } -def AluTypeClass : TypeClass { +def IntegerTypeClass : TypeClass { + let Types = [Sint8, Uint8, Sint16, Uint16, Sint32, + Uint32, Sint64, Uint64, IntAP, IntAPS]; +} + +def FixedSizeIntegralTypeClass : TypeClass { let Types = [Sint8, Uint8, Sint16, Uint16, Sint32, Uint32, Sint64, Uint64, Bool]; } +def NumberTypeClass : TypeClass { + let Types = !listconcat(IntegerTypeClass.Types, [Float]); +} + +def FloatTypeClass : TypeClass { + let Types = [Float]; +} + +def AluTypeClass : TypeClass { + let Types = !listconcat(IntegerTypeClass.Types, [Bool]); +} + def PtrTypeClass : TypeClass { - let Types = [Ptr]; + let Types = [Ptr, FnPtr, MemberPtr]; +} + +def BoolTypeClass : TypeClass { + let Types = [Bool]; +} + +def NonPtrTypeClass : TypeClass { + let Types = !listconcat(IntegerTypeClass.Types, [Bool], [Float]); } def AllTypeClass : TypeClass { - let Types = !listconcat(AluTypeClass.Types, PtrTypeClass.Types); + let Types = !listconcat(AluTypeClass.Types, PtrTypeClass.Types, FloatTypeClass.Types); } def ComparableTypeClass : TypeClass { - let Types = !listconcat(AluTypeClass.Types, [Ptr]); + let Types = !listconcat(AluTypeClass.Types, [Ptr], [Float], [FnPtr]); } class SingletonTypeClass<Type Ty> : TypeClass { @@ -105,6 +142,15 @@ class AluOpcode : Opcode { let HasGroup = 1; } +class FloatOpcode : Opcode { + let Args = [ArgRoundingMode]; +} + +class IntegerOpcode : Opcode { + let Types = [IntegerTypeClass]; + let HasGroup = 1; +} + //===----------------------------------------------------------------------===// // Jump opcodes //===----------------------------------------------------------------------===// @@ -149,6 +195,33 @@ def RetValue : Opcode { // [] -> EXIT def NoRet : Opcode {} + +def Call : Opcode { + let Args = [ArgFunction, ArgUint32]; +} + +def CallVirt : Opcode { + let Args = [ArgFunction, ArgUint32]; +} + +def CallBI : Opcode { + let Args = [ArgFunction, ArgCallExpr]; +} + +def CallPtr : Opcode { + let Args = [ArgUint32, ArgCallExpr]; +} + +def CallVar : Opcode { + let Args = [ArgFunction, ArgUint32]; +} + +def OffsetOf : Opcode { + let Types = [IntegerTypeClass]; + let Args = [ArgOffsetOfExpr]; + let HasGroup = 1; +} + //===----------------------------------------------------------------------===// // Frame management //===----------------------------------------------------------------------===// @@ -178,65 +251,75 @@ def ConstSint32 : ConstOpcode<Sint32, ArgSint32>; def ConstUint32 : ConstOpcode<Uint32, ArgUint32>; def ConstSint64 : ConstOpcode<Sint64, ArgSint64>; def ConstUint64 : ConstOpcode<Uint64, ArgUint64>; +def ConstFloat : ConstOpcode<Float, ArgFloat>; +def constIntAP : ConstOpcode<IntAP, ArgIntAP>; +def constIntAPS : ConstOpcode<IntAPS, ArgIntAPS>; def ConstBool : ConstOpcode<Bool, ArgBool>; // [] -> [Integer] def Zero : Opcode { - let Types = [AluTypeClass]; + let Types = [FixedSizeIntegralTypeClass]; + let HasGroup = 1; +} + +def ZeroIntAP : Opcode { + let Args = [ArgUint32]; +} + +def ZeroIntAPS : Opcode { + let Args = [ArgUint32]; } // [] -> [Pointer] def Null : Opcode { let Types = [PtrTypeClass]; + let Args = [ArgDesc]; + let HasGroup = 1; } //===----------------------------------------------------------------------===// // Pointer generation //===----------------------------------------------------------------------===// +class OffsetOpcode : Opcode { + let Args = [ArgUint32]; +} // [] -> [Pointer] -def GetPtrLocal : Opcode { - // Offset of local. - let Args = [ArgUint32]; +def GetPtrLocal : OffsetOpcode { bit HasCustomEval = 1; } // [] -> [Pointer] -def GetPtrParam : Opcode { - // Offset of parameter. - let Args = [ArgUint32]; -} +def GetPtrParam : OffsetOpcode; // [] -> [Pointer] -def GetPtrGlobal : Opcode { - // Index of global. - let Args = [ArgUint32]; -} +def GetPtrGlobal : OffsetOpcode; // [Pointer] -> [Pointer] -def GetPtrField : Opcode { - // Offset of field. - let Args = [ArgUint32]; -} +def GetPtrField : OffsetOpcode; +def GetPtrFieldPop : OffsetOpcode; // [Pointer] -> [Pointer] -def GetPtrActiveField : Opcode { - // Offset of field. - let Args = [ArgUint32]; -} +def GetPtrActiveField : OffsetOpcode; // [] -> [Pointer] -def GetPtrActiveThisField : Opcode { - // Offset of field. - let Args = [ArgUint32]; -} +def GetPtrActiveThisField : OffsetOpcode; // [] -> [Pointer] -def GetPtrThisField : Opcode { - // Offset of field. - let Args = [ArgUint32]; -} +def GetPtrThisField : OffsetOpcode; +// [Pointer] -> [Pointer] +def GetPtrBase : OffsetOpcode; // [Pointer] -> [Pointer] -def GetPtrBase : Opcode { +def GetPtrBasePop : OffsetOpcode; +def GetMemberPtrBasePop : Opcode { // Offset of field, which is a base. + let Args = [ArgSint32]; +} + + +def FinishInitPop : Opcode; +def FinishInit : Opcode; + +def GetPtrDerivedPop : Opcode { let Args = [ArgUint32]; } + // [Pointer] -> [Pointer] -def GetPtrVirtBase : Opcode { +def GetPtrVirtBasePop : Opcode { // RecordDecl of base class. let Args = [ArgRecordDecl]; } @@ -253,10 +336,34 @@ def GetPtrThisVirtBase : Opcode { // [] -> [Pointer] def This : Opcode; +// [] -> [Pointer] +def RVOPtr : Opcode; + // [Pointer] -> [Pointer] def NarrowPtr : Opcode; // [Pointer] -> [Pointer] def ExpandPtr : Opcode; +// [Pointer, Offset] -> [Pointer] +def ArrayElemPtr : AluOpcode; +def ArrayElemPtrPop : AluOpcode; + +def ArrayElemPop : Opcode { + let Args = [ArgUint32]; + let Types = [AllTypeClass]; + let HasGroup = 1; +} + +def ArrayElem : Opcode { + let Args = [ArgUint32]; + let Types = [AllTypeClass]; + let HasGroup = 1; +} + +def CopyArray : Opcode { + let Args = [ArgUint32, ArgUint32, ArgUint32]; + let Types = [AllTypeClass]; + let HasGroup = 1; +} //===----------------------------------------------------------------------===// // Direct field accessors @@ -279,11 +386,30 @@ def GetLocal : AccessOpcode { let HasCustomEval = 1; } // [] -> [Pointer] def SetLocal : AccessOpcode { let HasCustomEval = 1; } +def CheckDecl : Opcode { + let Args = [ArgVarDecl]; +} + +def CheckEnumValue : Opcode { + let Args = [ArgEnumDecl]; + let Types = [FixedSizeIntegralTypeClass]; + let HasGroup = 1; +} + // [] -> [Value] def GetGlobal : AccessOpcode; +def GetGlobalUnchecked : AccessOpcode; // [Value] -> [] def InitGlobal : AccessOpcode; // [Value] -> [] +def InitGlobalTemp : AccessOpcode { + let Args = [ArgUint32, ArgLETD]; +} +// [Pointer] -> [Pointer] +def InitGlobalTempComp : Opcode { + let Args = [ArgLETD]; +} +// [Value] -> [] def SetGlobal : AccessOpcode; // [] -> [Value] @@ -308,7 +434,11 @@ def InitThisField : AccessOpcode; // [Value] -> [] def InitThisFieldActive : AccessOpcode; // [Value] -> [] -def InitThisBitField : BitFieldOpcode; +def InitThisBitField : Opcode { + let Types = [AluTypeClass]; + let Args = [ArgRecordField, ArgUint32]; + let HasGroup = 1; +} // [Pointer, Value] -> [] def InitField : AccessOpcode; // [Pointer, Value] -> [] @@ -351,6 +481,7 @@ def StoreBitField : StoreBitFieldOpcode {} def StoreBitFieldPop : StoreBitFieldOpcode {} // [Pointer, Value] -> [] +def Init : StoreOpcode {} def InitPop : StoreOpcode {} // [Pointer, Value] -> [Pointer] def InitElem : Opcode { @@ -374,14 +505,179 @@ def AddOffset : AluOpcode; // [Pointer, Integral] -> [Pointer] def SubOffset : AluOpcode; +// [Pointer, Pointer] -> [Integral] +def SubPtr : Opcode { + let Types = [IntegerTypeClass]; + let HasGroup = 1; +} + +// [Pointer] -> [Pointer] +def IncPtr : Opcode; +// [Pointer] -> [Pointer] +def DecPtr : Opcode; + +//===----------------------------------------------------------------------===// +// Function pointers. +//===----------------------------------------------------------------------===// +def GetFnPtr : Opcode { + let Args = [ArgFunction]; +} + +def GetIntPtr : Opcode { + let Types = [AluTypeClass]; + let Args = [ArgDesc]; + let HasGroup = 1; +} + //===----------------------------------------------------------------------===// // Binary operators. //===----------------------------------------------------------------------===// // [Real, Real] -> [Real] -def Sub : AluOpcode; -def Add : AluOpcode; -def Mul : AluOpcode; +def Add : AluOpcode; +def Addf : FloatOpcode; +def Sub : AluOpcode; +def Subf : FloatOpcode; +def Mul : AluOpcode; +def Mulf : FloatOpcode; +def Mulc : Opcode { + let Types = [NumberTypeClass]; + let HasGroup = 1; +} +def Rem : IntegerOpcode; +def Div : IntegerOpcode; +def Divf : FloatOpcode; +def Divc : Opcode { + let Types = [NumberTypeClass]; + let HasGroup = 1; +} + +def BitAnd : IntegerOpcode; +def BitOr : IntegerOpcode; +def BitXor : IntegerOpcode; + +def Shl : Opcode { + let Types = [IntegerTypeClass, IntegerTypeClass]; + let HasGroup = 1; +} + +def Shr : Opcode { + let Types = [IntegerTypeClass, IntegerTypeClass]; + let HasGroup = 1; +} + +//===----------------------------------------------------------------------===// +// Unary operators. +//===----------------------------------------------------------------------===// + +// [Real] -> [Real] +def Inv: Opcode { + let Types = [BoolTypeClass]; + let HasGroup = 1; +} + +// Increment and decrement. +def Inc: AluOpcode; +def IncPop : AluOpcode; +def Dec: AluOpcode; +def DecPop: AluOpcode; + +// Float increment and decrement. +def Incf: FloatOpcode; +def IncfPop : FloatOpcode; +def Decf: FloatOpcode; +def DecfPop : FloatOpcode; + +// [Real] -> [Real] +def Neg: Opcode { + let Types = [NonPtrTypeClass]; + let HasGroup = 1; +} + +// [Real] -> [Real] +def Comp: Opcode { + let Types = [IntegerTypeClass]; + let HasGroup = 1; +} + +//===----------------------------------------------------------------------===// +// Cast, CastFP. +//===----------------------------------------------------------------------===// + +def FromCastTypeClass : TypeClass { + let Types = [Uint8, Sint8, Uint16, Sint16, Uint32, Sint32, Uint64, Sint64, Bool, IntAP, IntAPS]; +} + +def ToCastTypeClass : TypeClass { + let Types = [Uint8, Sint8, Uint16, Sint16, Uint32, Sint32, Uint64, Sint64, Bool]; +} + +def Cast: Opcode { + let Types = [FromCastTypeClass, ToCastTypeClass]; + let HasGroup = 1; +} + +def CastFP : Opcode { + let Args = [ArgFltSemantics, ArgRoundingMode]; +} + +def FixedSizeIntegralTypes : TypeClass { + let Types = [Uint8, Sint8, Uint16, Sint16, Uint32, Sint32, Uint64, Sint64, Bool]; +} + +def CastAP : Opcode { + let Types = [AluTypeClass]; + let Args = [ArgUint32]; + let HasGroup = 1; +} + +def CastAPS : Opcode { + let Types = [AluTypeClass]; + let Args = [ArgUint32]; + let HasGroup = 1; +} + +// Cast an integer to a floating type +def CastIntegralFloating : Opcode { + let Types = [AluTypeClass]; + let Args = [ArgFltSemantics, ArgRoundingMode]; + let HasGroup = 1; +} + +// Cast a floating to an integer type +def CastFloatingIntegral : Opcode { + let Types = [FixedSizeIntegralTypes]; + let Args = []; + let HasGroup = 1; +} + +def CastFloatingIntegralAP : Opcode { + let Args = [ArgUint32]; +} + +def CastFloatingIntegralAPS : Opcode { + let Args = [ArgUint32]; +} + +def CastPointerIntegral : Opcode { + let Types = [FixedSizeIntegralTypeClass]; + let HasGroup = 1; +} +def CastPointerIntegralAP : Opcode { + let Args = [ArgUint32]; +} +def CastPointerIntegralAPS : Opcode { + let Args = [ArgUint32]; +} +def PtrPtrCast : Opcode { + let Args = [ArgBool]; + +} + +def DecayPtr : Opcode { + let Types = [PtrTypeClass, PtrTypeClass]; + let HasGroup = 1; +} //===----------------------------------------------------------------------===// // Comparison opcodes. @@ -400,6 +696,10 @@ class ComparisonOpcode : Opcode { let HasGroup = 1; } +def CMP3 : ComparisonOpcode { + let Args = [ArgCCI]; +} + def LT : ComparisonOpcode; def LE : ComparisonOpcode; def GT : ComparisonOpcode; @@ -420,3 +720,61 @@ def Dup : Opcode { let Types = [AllTypeClass]; let HasGroup = 1; } + +// [] -> [] +def Invalid : Opcode {} +def Unsupported : Opcode {} +def Error : Opcode {} +def InvalidCast : Opcode { + let Args = [ArgCastKind]; +} + +def InvalidDeclRef : Opcode { + let Args = [ArgDeclRef]; +} + +def SizelessVectorElementSize : Opcode; + +def Assume : Opcode; + +def ArrayDecay : Opcode; + +def CheckNonNullArg : Opcode { + let Types = [PtrTypeClass]; + let HasGroup = 1; +} + +def Memcpy : Opcode; + +def ToMemberPtr : Opcode; +def CastMemberPtrPtr : Opcode; +def GetMemberPtr : Opcode { + let Args = [ArgDecl]; +} +def GetMemberPtrBase : Opcode; +def GetMemberPtrDecl : Opcode; + +//===----------------------------------------------------------------------===// +// Debugging. +//===----------------------------------------------------------------------===// +def Dump : Opcode; + +def Alloc : Opcode { + let Args = [ArgDesc]; +} + +def AllocN : Opcode { + let Types = [IntegerTypeClass]; + let Args = [ArgPrimType, ArgExpr, ArgBool]; + let HasGroup = 1; +} + +def AllocCN : Opcode { + let Types = [IntegerTypeClass]; + let Args = [ArgDesc, ArgBool]; + let HasGroup = 1; +} + +def Free : Opcode { + let Args = [ArgBool]; +} diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Pointer.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Pointer.cpp index ef2638e2a36b..29579f5db40b 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/Pointer.cpp +++ b/contrib/llvm-project/clang/lib/AST/Interp/Pointer.cpp @@ -7,176 +7,353 @@ //===----------------------------------------------------------------------===// #include "Pointer.h" +#include "Boolean.h" +#include "Context.h" +#include "Floating.h" #include "Function.h" +#include "Integral.h" #include "InterpBlock.h" +#include "MemberPointer.h" #include "PrimType.h" +#include "Record.h" +#include "clang/AST/RecordLayout.h" using namespace clang; using namespace clang::interp; -Pointer::Pointer(Block *Pointee) : Pointer(Pointee, 0, 0) {} +Pointer::Pointer(Block *Pointee) + : Pointer(Pointee, Pointee->getDescriptor()->getMetadataSize(), + Pointee->getDescriptor()->getMetadataSize()) {} -Pointer::Pointer(const Pointer &P) : Pointer(P.Pointee, P.Base, P.Offset) {} +Pointer::Pointer(Block *Pointee, uint64_t BaseAndOffset) + : Pointer(Pointee, BaseAndOffset, BaseAndOffset) {} -Pointer::Pointer(Pointer &&P) - : Pointee(P.Pointee), Base(P.Base), Offset(P.Offset) { - if (Pointee) - Pointee->movePointer(&P, this); +Pointer::Pointer(const Pointer &P) + : Offset(P.Offset), PointeeStorage(P.PointeeStorage), + StorageKind(P.StorageKind) { + + if (isBlockPointer() && PointeeStorage.BS.Pointee) + PointeeStorage.BS.Pointee->addPointer(this); } -Pointer::Pointer(Block *Pointee, unsigned Base, unsigned Offset) - : Pointee(Pointee), Base(Base), Offset(Offset) { +Pointer::Pointer(Block *Pointee, unsigned Base, uint64_t Offset) + : Offset(Offset), StorageKind(Storage::Block) { assert((Base == RootPtrMark || Base % alignof(void *) == 0) && "wrong base"); + + PointeeStorage.BS = {Pointee, Base}; + if (Pointee) Pointee->addPointer(this); } +Pointer::Pointer(Pointer &&P) + : Offset(P.Offset), PointeeStorage(P.PointeeStorage), + StorageKind(P.StorageKind) { + + if (StorageKind == Storage::Block && PointeeStorage.BS.Pointee) + PointeeStorage.BS.Pointee->replacePointer(&P, this); +} + Pointer::~Pointer() { - if (Pointee) { + if (isIntegralPointer()) + return; + + if (Block *Pointee = PointeeStorage.BS.Pointee) { Pointee->removePointer(this); Pointee->cleanup(); } } void Pointer::operator=(const Pointer &P) { - Block *Old = Pointee; - - if (Pointee) - Pointee->removePointer(this); + // If the current storage type is Block, we need to remove + // this pointer from the block. + bool WasBlockPointer = isBlockPointer(); + if (StorageKind == Storage::Block) { + Block *Old = PointeeStorage.BS.Pointee; + if (WasBlockPointer && Old) { + PointeeStorage.BS.Pointee->removePointer(this); + Old->cleanup(); + } + } + StorageKind = P.StorageKind; Offset = P.Offset; - Base = P.Base; - Pointee = P.Pointee; - if (Pointee) - Pointee->addPointer(this); + if (P.isBlockPointer()) { + PointeeStorage.BS = P.PointeeStorage.BS; + PointeeStorage.BS.Pointee = P.PointeeStorage.BS.Pointee; - if (Old) - Old->cleanup(); + if (PointeeStorage.BS.Pointee) + PointeeStorage.BS.Pointee->addPointer(this); + } else if (P.isIntegralPointer()) { + PointeeStorage.Int = P.PointeeStorage.Int; + } else { + assert(false && "Unhandled storage kind"); + } } void Pointer::operator=(Pointer &&P) { - Block *Old = Pointee; - - if (Pointee) - Pointee->removePointer(this); + // If the current storage type is Block, we need to remove + // this pointer from the block. + bool WasBlockPointer = isBlockPointer(); + if (StorageKind == Storage::Block) { + Block *Old = PointeeStorage.BS.Pointee; + if (WasBlockPointer && Old) { + PointeeStorage.BS.Pointee->removePointer(this); + Old->cleanup(); + } + } + StorageKind = P.StorageKind; Offset = P.Offset; - Base = P.Base; - Pointee = P.Pointee; - if (Pointee) - Pointee->movePointer(&P, this); + if (P.isBlockPointer()) { + PointeeStorage.BS = P.PointeeStorage.BS; + PointeeStorage.BS.Pointee = P.PointeeStorage.BS.Pointee; - if (Old) - Old->cleanup(); + if (PointeeStorage.BS.Pointee) + PointeeStorage.BS.Pointee->addPointer(this); + } else if (P.isIntegralPointer()) { + PointeeStorage.Int = P.PointeeStorage.Int; + } else { + assert(false && "Unhandled storage kind"); + } } -APValue Pointer::toAPValue() const { - APValue::LValueBase Base; +APValue Pointer::toAPValue(const ASTContext &ASTCtx) const { llvm::SmallVector<APValue::LValuePathEntry, 5> Path; - CharUnits Offset; - bool IsNullPtr; - bool IsOnePastEnd; - - if (isZero()) { - Base = static_cast<const Expr *>(nullptr); - IsNullPtr = true; - IsOnePastEnd = false; - Offset = CharUnits::Zero(); - } else { - // Build the lvalue base from the block. - Descriptor *Desc = getDeclDesc(); - if (auto *VD = Desc->asValueDecl()) - Base = VD; - else if (auto *E = Desc->asExpr()) - Base = E; - else - llvm_unreachable("Invalid allocation type"); - // Not a null pointer. - IsNullPtr = false; + if (isZero()) + return APValue(static_cast<const Expr *>(nullptr), CharUnits::Zero(), Path, + /*IsOnePastEnd=*/false, /*IsNullPtr=*/true); + if (isIntegralPointer()) + return APValue(static_cast<const Expr *>(nullptr), + CharUnits::fromQuantity(asIntPointer().Value + this->Offset), + Path, + /*IsOnePastEnd=*/false, /*IsNullPtr=*/false); + + // Build the lvalue base from the block. + const Descriptor *Desc = getDeclDesc(); + APValue::LValueBase Base; + if (const auto *VD = Desc->asValueDecl()) + Base = VD; + else if (const auto *E = Desc->asExpr()) + Base = E; + else + llvm_unreachable("Invalid allocation type"); + + if (isUnknownSizeArray() || Desc->asExpr()) + return APValue(Base, CharUnits::Zero(), Path, + /*IsOnePastEnd=*/isOnePastEnd(), /*IsNullPtr=*/false); + + CharUnits Offset = CharUnits::Zero(); - if (isUnknownSizeArray()) { - IsOnePastEnd = false; - Offset = CharUnits::Zero(); + auto getFieldOffset = [&](const FieldDecl *FD) -> CharUnits { + // This shouldn't happen, but if it does, don't crash inside + // getASTRecordLayout. + if (FD->getParent()->isInvalidDecl()) + return CharUnits::Zero(); + const ASTRecordLayout &Layout = ASTCtx.getASTRecordLayout(FD->getParent()); + unsigned FieldIndex = FD->getFieldIndex(); + return ASTCtx.toCharUnitsFromBits(Layout.getFieldOffset(FieldIndex)); + }; + + // Build the path into the object. + Pointer Ptr = *this; + while (Ptr.isField() || Ptr.isArrayElement()) { + if (Ptr.isArrayRoot()) { + Path.push_back(APValue::LValuePathEntry( + {Ptr.getFieldDesc()->asDecl(), /*IsVirtual=*/false})); + + if (const auto *FD = dyn_cast<FieldDecl>(Ptr.getFieldDesc()->asDecl())) + Offset += getFieldOffset(FD); + + Ptr = Ptr.getBase(); + } else if (Ptr.isArrayElement()) { + unsigned Index; + if (Ptr.isOnePastEnd()) + Index = Ptr.getArray().getNumElems(); + else + Index = Ptr.getIndex(); + + Offset += (Index * ASTCtx.getTypeSizeInChars(Ptr.getType())); + Path.push_back(APValue::LValuePathEntry::ArrayIndex(Index)); + Ptr = Ptr.getArray(); } else { - // TODO: compute the offset into the object. - Offset = CharUnits::Zero(); - - // Build the path into the object. - Pointer Ptr = *this; - while (Ptr.isField()) { - if (Ptr.isArrayElement()) { - Path.push_back(APValue::LValuePathEntry::ArrayIndex(Ptr.getIndex())); - Ptr = Ptr.getArray(); + bool IsVirtual = false; + + // Create a path entry for the field. + const Descriptor *Desc = Ptr.getFieldDesc(); + if (const auto *BaseOrMember = Desc->asDecl()) { + if (const auto *FD = dyn_cast<FieldDecl>(BaseOrMember)) { + Ptr = Ptr.getBase(); + Offset += getFieldOffset(FD); + } else if (const auto *RD = dyn_cast<CXXRecordDecl>(BaseOrMember)) { + IsVirtual = Ptr.isVirtualBaseClass(); + Ptr = Ptr.getBase(); + const Record *BaseRecord = Ptr.getRecord(); + + const ASTRecordLayout &Layout = ASTCtx.getASTRecordLayout( + cast<CXXRecordDecl>(BaseRecord->getDecl())); + if (IsVirtual) + Offset += Layout.getVBaseClassOffset(RD); + else + Offset += Layout.getBaseClassOffset(RD); + } else { - // TODO: figure out if base is virtual - bool IsVirtual = false; - - // Create a path entry for the field. - Descriptor *Desc = Ptr.getFieldDesc(); - if (auto *BaseOrMember = Desc->asDecl()) { - Path.push_back(APValue::LValuePathEntry({BaseOrMember, IsVirtual})); - Ptr = Ptr.getBase(); - continue; - } - llvm_unreachable("Invalid field type"); + Ptr = Ptr.getBase(); } + Path.push_back(APValue::LValuePathEntry({BaseOrMember, IsVirtual})); + continue; } - - IsOnePastEnd = isOnePastEnd(); + llvm_unreachable("Invalid field type"); } } - return APValue(Base, Offset, Path, IsOnePastEnd, IsNullPtr); + // FIXME(perf): We compute the lvalue path above, but we can't supply it + // for dummy pointers (that causes crashes later in CheckConstantExpression). + if (isDummy()) + Path.clear(); + + // We assemble the LValuePath starting from the innermost pointer to the + // outermost one. SO in a.b.c, the first element in Path will refer to + // the field 'c', while later code expects it to refer to 'a'. + // Just invert the order of the elements. + std::reverse(Path.begin(), Path.end()); + + return APValue(Base, Offset, Path, /*IsOnePastEnd=*/isOnePastEnd(), + /*IsNullPtr=*/false); +} + +void Pointer::print(llvm::raw_ostream &OS) const { + OS << PointeeStorage.BS.Pointee << " ("; + if (isBlockPointer()) { + const Block *B = PointeeStorage.BS.Pointee; + OS << "Block) {"; + + if (isRoot()) + OS << "rootptr(" << PointeeStorage.BS.Base << "), "; + else + OS << PointeeStorage.BS.Base << ", "; + + if (isElementPastEnd()) + OS << "pastend, "; + else + OS << Offset << ", "; + + if (B) + OS << B->getSize(); + else + OS << "nullptr"; + } else { + OS << "Int) {"; + OS << PointeeStorage.Int.Value << ", " << PointeeStorage.Int.Desc; + } + OS << "}"; +} + +std::string Pointer::toDiagnosticString(const ASTContext &Ctx) const { + if (isZero()) + return "nullptr"; + + if (isIntegralPointer()) + return (Twine("&(") + Twine(asIntPointer().Value + Offset) + ")").str(); + + return toAPValue(Ctx).getAsString(Ctx, getType()); } bool Pointer::isInitialized() const { - assert(Pointee && "Cannot check if null pointer was initialized"); - Descriptor *Desc = getFieldDesc(); + if (isIntegralPointer()) + return true; + + if (isRoot() && PointeeStorage.BS.Base == sizeof(GlobalInlineDescriptor)) { + const GlobalInlineDescriptor &GD = + *reinterpret_cast<const GlobalInlineDescriptor *>(block()->rawData()); + return GD.InitState == GlobalInitState::Initialized; + } + + assert(PointeeStorage.BS.Pointee && + "Cannot check if null pointer was initialized"); + const Descriptor *Desc = getFieldDesc(); + assert(Desc); if (Desc->isPrimitiveArray()) { - if (Pointee->IsStatic) + if (isStatic() && PointeeStorage.BS.Base == 0) return true; - // Primitive array field are stored in a bitset. - InitMap *Map = getInitMap(); - if (!Map) + + InitMapPtr &IM = getInitMap(); + + if (!IM) return false; - if (Map == (InitMap *)-1) + + if (IM->first) return true; - return Map->isInitialized(getIndex()); - } else { - // Field has its bit in an inline descriptor. - return Base == 0 || getInlineDesc()->IsInitialized; + + return IM->second->isElementInitialized(getIndex()); } + + if (asBlockPointer().Base == 0) + return true; + + // Field has its bit in an inline descriptor. + return getInlineDesc()->IsInitialized; } void Pointer::initialize() const { - assert(Pointee && "Cannot initialize null pointer"); - Descriptor *Desc = getFieldDesc(); + if (isIntegralPointer()) + return; + + assert(PointeeStorage.BS.Pointee && "Cannot initialize null pointer"); + const Descriptor *Desc = getFieldDesc(); + + if (isRoot() && PointeeStorage.BS.Base == sizeof(GlobalInlineDescriptor)) { + GlobalInlineDescriptor &GD = *reinterpret_cast<GlobalInlineDescriptor *>( + asBlockPointer().Pointee->rawData()); + GD.InitState = GlobalInitState::Initialized; + return; + } + + assert(Desc); if (Desc->isPrimitiveArray()) { - if (!Pointee->IsStatic) { - // Primitive array initializer. - InitMap *&Map = getInitMap(); - if (Map == (InitMap *)-1) - return; - if (Map == nullptr) - Map = InitMap::allocate(Desc->getNumElems()); - if (Map->initialize(getIndex())) { - free(Map); - Map = (InitMap *)-1; - } + // Primitive global arrays don't have an initmap. + if (isStatic() && PointeeStorage.BS.Base == 0) + return; + + // Nothing to do for these. + if (Desc->getNumElems() == 0) + return; + + InitMapPtr &IM = getInitMap(); + if (!IM) + IM = + std::make_pair(false, std::make_shared<InitMap>(Desc->getNumElems())); + + assert(IM); + + // All initialized. + if (IM->first) + return; + + if (IM->second->initializeElement(getIndex())) { + IM->first = true; + IM->second.reset(); } - } else { - // Field has its bit in an inline descriptor. - assert(Base != 0 && "Only composite fields can be initialised"); - getInlineDesc()->IsInitialized = true; + return; } + + // Field has its bit in an inline descriptor. + assert(PointeeStorage.BS.Base != 0 && + "Only composite fields can be initialised"); + getInlineDesc()->IsInitialized = true; } void Pointer::activate() const { // Field has its bit in an inline descriptor. - assert(Base != 0 && "Only composite fields can be initialised"); + assert(PointeeStorage.BS.Base != 0 && + "Only composite fields can be initialised"); + + if (isRoot() && PointeeStorage.BS.Base == sizeof(GlobalInlineDescriptor)) + return; + getInlineDesc()->IsActive = true; } @@ -185,9 +362,188 @@ void Pointer::deactivate() const { } bool Pointer::hasSameBase(const Pointer &A, const Pointer &B) { - return A.Pointee == B.Pointee; + // Two null pointers always have the same base. + if (A.isZero() && B.isZero()) + return true; + + if (A.isIntegralPointer() && B.isIntegralPointer()) + return true; + + if (A.isIntegralPointer() || B.isIntegralPointer()) + return A.getSource() == B.getSource(); + + return A.asBlockPointer().Pointee == B.asBlockPointer().Pointee; } bool Pointer::hasSameArray(const Pointer &A, const Pointer &B) { - return A.Base == B.Base && A.getFieldDesc()->IsArray; + return hasSameBase(A, B) && + A.PointeeStorage.BS.Base == B.PointeeStorage.BS.Base && + A.getFieldDesc()->IsArray; +} + +std::optional<APValue> Pointer::toRValue(const Context &Ctx, + QualType ResultType) const { + const ASTContext &ASTCtx = Ctx.getASTContext(); + assert(!ResultType.isNull()); + // Method to recursively traverse composites. + std::function<bool(QualType, const Pointer &, APValue &)> Composite; + Composite = [&Composite, &Ctx, &ASTCtx](QualType Ty, const Pointer &Ptr, + APValue &R) { + if (const auto *AT = Ty->getAs<AtomicType>()) + Ty = AT->getValueType(); + + // Invalid pointers. + if (Ptr.isDummy() || !Ptr.isLive() || !Ptr.isBlockPointer() || + Ptr.isPastEnd()) + return false; + + // Primitive values. + if (std::optional<PrimType> T = Ctx.classify(Ty)) { + TYPE_SWITCH(*T, R = Ptr.deref<T>().toAPValue(ASTCtx)); + return true; + } + + if (const auto *RT = Ty->getAs<RecordType>()) { + const auto *Record = Ptr.getRecord(); + assert(Record && "Missing record descriptor"); + + bool Ok = true; + if (RT->getDecl()->isUnion()) { + const FieldDecl *ActiveField = nullptr; + APValue Value; + for (const auto &F : Record->fields()) { + const Pointer &FP = Ptr.atField(F.Offset); + QualType FieldTy = F.Decl->getType(); + if (FP.isActive()) { + if (std::optional<PrimType> T = Ctx.classify(FieldTy)) { + TYPE_SWITCH(*T, Value = FP.deref<T>().toAPValue(ASTCtx)); + } else { + Ok &= Composite(FieldTy, FP, Value); + } + ActiveField = FP.getFieldDesc()->asFieldDecl(); + break; + } + } + R = APValue(ActiveField, Value); + } else { + unsigned NF = Record->getNumFields(); + unsigned NB = Record->getNumBases(); + unsigned NV = Ptr.isBaseClass() ? 0 : Record->getNumVirtualBases(); + + R = APValue(APValue::UninitStruct(), NB, NF); + + for (unsigned I = 0; I < NF; ++I) { + const Record::Field *FD = Record->getField(I); + QualType FieldTy = FD->Decl->getType(); + const Pointer &FP = Ptr.atField(FD->Offset); + APValue &Value = R.getStructField(I); + + if (std::optional<PrimType> T = Ctx.classify(FieldTy)) { + TYPE_SWITCH(*T, Value = FP.deref<T>().toAPValue(ASTCtx)); + } else { + Ok &= Composite(FieldTy, FP, Value); + } + } + + for (unsigned I = 0; I < NB; ++I) { + const Record::Base *BD = Record->getBase(I); + QualType BaseTy = Ctx.getASTContext().getRecordType(BD->Decl); + const Pointer &BP = Ptr.atField(BD->Offset); + Ok &= Composite(BaseTy, BP, R.getStructBase(I)); + } + + for (unsigned I = 0; I < NV; ++I) { + const Record::Base *VD = Record->getVirtualBase(I); + QualType VirtBaseTy = Ctx.getASTContext().getRecordType(VD->Decl); + const Pointer &VP = Ptr.atField(VD->Offset); + Ok &= Composite(VirtBaseTy, VP, R.getStructBase(NB + I)); + } + } + return Ok; + } + + if (Ty->isIncompleteArrayType()) { + R = APValue(APValue::UninitArray(), 0, 0); + return true; + } + + if (const auto *AT = Ty->getAsArrayTypeUnsafe()) { + const size_t NumElems = Ptr.getNumElems(); + QualType ElemTy = AT->getElementType(); + R = APValue(APValue::UninitArray{}, NumElems, NumElems); + + bool Ok = true; + for (unsigned I = 0; I < NumElems; ++I) { + APValue &Slot = R.getArrayInitializedElt(I); + const Pointer &EP = Ptr.atIndex(I); + if (std::optional<PrimType> T = Ctx.classify(ElemTy)) { + TYPE_SWITCH(*T, Slot = EP.deref<T>().toAPValue(ASTCtx)); + } else { + Ok &= Composite(ElemTy, EP.narrow(), Slot); + } + } + return Ok; + } + + // Complex types. + if (const auto *CT = Ty->getAs<ComplexType>()) { + QualType ElemTy = CT->getElementType(); + + if (ElemTy->isIntegerType()) { + std::optional<PrimType> ElemT = Ctx.classify(ElemTy); + assert(ElemT); + INT_TYPE_SWITCH(*ElemT, { + auto V1 = Ptr.atIndex(0).deref<T>(); + auto V2 = Ptr.atIndex(1).deref<T>(); + R = APValue(V1.toAPSInt(), V2.toAPSInt()); + return true; + }); + } else if (ElemTy->isFloatingType()) { + R = APValue(Ptr.atIndex(0).deref<Floating>().getAPFloat(), + Ptr.atIndex(1).deref<Floating>().getAPFloat()); + return true; + } + return false; + } + + // Vector types. + if (const auto *VT = Ty->getAs<VectorType>()) { + assert(Ptr.getFieldDesc()->isPrimitiveArray()); + QualType ElemTy = VT->getElementType(); + PrimType ElemT = *Ctx.classify(ElemTy); + + SmallVector<APValue> Values; + Values.reserve(VT->getNumElements()); + for (unsigned I = 0; I != VT->getNumElements(); ++I) { + TYPE_SWITCH(ElemT, { + Values.push_back(Ptr.atIndex(I).deref<T>().toAPValue(ASTCtx)); + }); + } + + assert(Values.size() == VT->getNumElements()); + R = APValue(Values.data(), Values.size()); + return true; + } + + llvm_unreachable("invalid value to return"); + }; + + // Invalid to read from. + if (isDummy() || !isLive() || isPastEnd()) + return std::nullopt; + + // We can return these as rvalues, but we can't deref() them. + if (isZero() || isIntegralPointer()) + return toAPValue(ASTCtx); + + // Just load primitive types. + if (std::optional<PrimType> T = Ctx.classify(ResultType)) { + TYPE_SWITCH(*T, return this->deref<T>().toAPValue(ASTCtx)); + } + + // Return the composite type. + APValue Result; + if (!Composite(getType(), *this, Result)) + return std::nullopt; + return Result; } diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Pointer.h b/contrib/llvm-project/clang/lib/AST/Interp/Pointer.h index f2f6e0e76018..e351699023ba 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/Pointer.h +++ b/contrib/llvm-project/clang/lib/AST/Interp/Pointer.h @@ -19,206 +19,418 @@ #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/Expr.h" -#include "llvm/ADT/PointerUnion.h" #include "llvm/Support/raw_ostream.h" namespace clang { namespace interp { class Block; class DeadBlock; -class Context; -class InterpState; class Pointer; -class Function; +class Context; +template <unsigned A, bool B> class Integral; enum PrimType : unsigned; +class Pointer; +inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const Pointer &P); + +struct BlockPointer { + /// The block the pointer is pointing to. + Block *Pointee; + /// Start of the current subfield. + unsigned Base; +}; + +struct IntPointer { + const Descriptor *Desc; + uint64_t Value; +}; + +enum class Storage { Block, Int }; + /// A pointer to a memory block, live or dead. /// /// This object can be allocated into interpreter stack frames. If pointing to /// a live block, it is a link in the chain of pointers pointing to the block. +/// +/// In the simplest form, a Pointer has a Block* (the pointee) and both Base +/// and Offset are 0, which means it will point to raw data. +/// +/// The Base field is used to access metadata about the data. For primitive +/// arrays, the Base is followed by an InitMap. In a variety of cases, the +/// Base is preceded by an InlineDescriptor, which is used to track the +/// initialization state, among other things. +/// +/// The Offset field is used to access the actual data. In other words, the +/// data the pointer decribes can be found at +/// Pointee->rawData() + Pointer.Offset. +/// +/// +/// Pointee Offset +/// │ │ +/// │ │ +/// ▼ ▼ +/// ┌───────┬────────────┬─────────┬────────────────────────────┐ +/// │ Block │ InlineDesc │ InitMap │ Actual Data │ +/// └───────┴────────────┴─────────┴────────────────────────────┘ +/// ▲ +/// │ +/// │ +/// Base class Pointer { private: - static constexpr unsigned PastEndMark = (unsigned)-1; - static constexpr unsigned RootPtrMark = (unsigned)-1; + static constexpr unsigned PastEndMark = ~0u; + static constexpr unsigned RootPtrMark = ~0u; public: - Pointer() {} + Pointer() { + StorageKind = Storage::Int; + PointeeStorage.Int.Value = 0; + PointeeStorage.Int.Desc = nullptr; + } Pointer(Block *B); + Pointer(Block *B, uint64_t BaseAndOffset); Pointer(const Pointer &P); Pointer(Pointer &&P); + Pointer(uint64_t Address, const Descriptor *Desc, uint64_t Offset = 0) + : Offset(Offset), StorageKind(Storage::Int) { + PointeeStorage.Int.Value = Address; + PointeeStorage.Int.Desc = Desc; + } ~Pointer(); void operator=(const Pointer &P); void operator=(Pointer &&P); + /// Equality operators are just for tests. + bool operator==(const Pointer &P) const { + if (P.StorageKind != StorageKind) + return false; + if (isIntegralPointer()) + return P.asIntPointer().Value == asIntPointer().Value && + Offset == P.Offset; + + assert(isBlockPointer()); + return P.asBlockPointer().Pointee == asBlockPointer().Pointee && + P.asBlockPointer().Base == asBlockPointer().Base && + Offset == P.Offset; + } + + bool operator!=(const Pointer &P) const { return !(P == *this); } + /// Converts the pointer to an APValue. - APValue toAPValue() const; + APValue toAPValue(const ASTContext &ASTCtx) const; + + /// Converts the pointer to a string usable in diagnostics. + std::string toDiagnosticString(const ASTContext &Ctx) const; + + uint64_t getIntegerRepresentation() const { + if (isIntegralPointer()) + return asIntPointer().Value + (Offset * elemSize()); + return reinterpret_cast<uint64_t>(asBlockPointer().Pointee) + Offset; + } + + /// Converts the pointer to an APValue that is an rvalue. + std::optional<APValue> toRValue(const Context &Ctx, + QualType ResultType) const; /// Offsets a pointer inside an array. - Pointer atIndex(unsigned Idx) const { - if (Base == RootPtrMark) - return Pointer(Pointee, RootPtrMark, getDeclDesc()->getSize()); - unsigned Off = Idx * elemSize(); + [[nodiscard]] Pointer atIndex(uint64_t Idx) const { + if (isIntegralPointer()) + return Pointer(asIntPointer().Value, asIntPointer().Desc, Idx); + + if (asBlockPointer().Base == RootPtrMark) + return Pointer(asBlockPointer().Pointee, RootPtrMark, + getDeclDesc()->getSize()); + uint64_t Off = Idx * elemSize(); if (getFieldDesc()->ElemDesc) Off += sizeof(InlineDescriptor); else - Off += sizeof(InitMap *); - return Pointer(Pointee, Base, Base + Off); + Off += sizeof(InitMapPtr); + return Pointer(asBlockPointer().Pointee, asBlockPointer().Base, + asBlockPointer().Base + Off); } /// Creates a pointer to a field. - Pointer atField(unsigned Off) const { + [[nodiscard]] Pointer atField(unsigned Off) const { unsigned Field = Offset + Off; - return Pointer(Pointee, Field, Field); + if (isIntegralPointer()) + return Pointer(asIntPointer().Value + Field, asIntPointer().Desc); + return Pointer(asBlockPointer().Pointee, Field, Field); + } + + /// Subtract the given offset from the current Base and Offset + /// of the pointer. + [[nodiscard]] Pointer atFieldSub(unsigned Off) const { + assert(Offset >= Off); + unsigned O = Offset - Off; + return Pointer(asBlockPointer().Pointee, O, O); } /// Restricts the scope of an array element pointer. - Pointer narrow() const { + [[nodiscard]] Pointer narrow() const { + if (!isBlockPointer()) + return *this; + assert(isBlockPointer()); // Null pointers cannot be narrowed. if (isZero() || isUnknownSizeArray()) return *this; // Pointer to an array of base types - enter block. - if (Base == RootPtrMark) - return Pointer(Pointee, 0, Offset == 0 ? Offset : PastEndMark); + if (asBlockPointer().Base == RootPtrMark) + return Pointer(asBlockPointer().Pointee, sizeof(InlineDescriptor), + Offset == 0 ? Offset : PastEndMark); // Pointer is one past end - magic offset marks that. if (isOnePastEnd()) - return Pointer(Pointee, Base, PastEndMark); + return Pointer(asBlockPointer().Pointee, asBlockPointer().Base, + PastEndMark); // Primitive arrays are a bit special since they do not have inline // descriptors. If Offset != Base, then the pointer already points to // an element and there is nothing to do. Otherwise, the pointer is // adjusted to the first element of the array. if (inPrimitiveArray()) { - if (Offset != Base) + if (Offset != asBlockPointer().Base) return *this; - return Pointer(Pointee, Base, Offset + sizeof(InitMap *)); + return Pointer(asBlockPointer().Pointee, asBlockPointer().Base, + Offset + sizeof(InitMapPtr)); } // Pointer is to a field or array element - enter it. - if (Offset != Base) - return Pointer(Pointee, Offset, Offset); + if (Offset != asBlockPointer().Base) + return Pointer(asBlockPointer().Pointee, Offset, Offset); // Enter the first element of an array. if (!getFieldDesc()->isArray()) return *this; - const unsigned NewBase = Base + sizeof(InlineDescriptor); - return Pointer(Pointee, NewBase, NewBase); + const unsigned NewBase = asBlockPointer().Base + sizeof(InlineDescriptor); + return Pointer(asBlockPointer().Pointee, NewBase, NewBase); } /// Expands a pointer to the containing array, undoing narrowing. - Pointer expand() const { + [[nodiscard]] Pointer expand() const { + assert(isBlockPointer()); + Block *Pointee = asBlockPointer().Pointee; + if (isElementPastEnd()) { // Revert to an outer one-past-end pointer. unsigned Adjust; if (inPrimitiveArray()) - Adjust = sizeof(InitMap *); + Adjust = sizeof(InitMapPtr); else Adjust = sizeof(InlineDescriptor); - return Pointer(Pointee, Base, Base + getSize() + Adjust); + return Pointer(Pointee, asBlockPointer().Base, + asBlockPointer().Base + getSize() + Adjust); } // Do not step out of array elements. - if (Base != Offset) + if (asBlockPointer().Base != Offset) return *this; // If at base, point to an array of base types. - if (Base == 0) + if (isRoot()) return Pointer(Pointee, RootPtrMark, 0); // Step into the containing array, if inside one. - unsigned Next = Base - getInlineDesc()->Offset; - Descriptor *Desc = Next == 0 ? getDeclDesc() : getDescriptor(Next)->Desc; + unsigned Next = asBlockPointer().Base - getInlineDesc()->Offset; + const Descriptor *Desc = + (Next == Pointee->getDescriptor()->getMetadataSize()) + ? getDeclDesc() + : getDescriptor(Next)->Desc; if (!Desc->IsArray) return *this; return Pointer(Pointee, Next, Offset); } /// Checks if the pointer is null. - bool isZero() const { return Pointee == nullptr; } + bool isZero() const { + if (isBlockPointer()) + return asBlockPointer().Pointee == nullptr; + assert(isIntegralPointer()); + return asIntPointer().Value == 0 && Offset == 0; + } /// Checks if the pointer is live. - bool isLive() const { return Pointee && !Pointee->IsDead; } + bool isLive() const { + if (isIntegralPointer()) + return true; + return asBlockPointer().Pointee && !asBlockPointer().Pointee->IsDead; + } /// Checks if the item is a field in an object. - bool isField() const { return Base != 0 && Base != RootPtrMark; } + bool isField() const { + if (isIntegralPointer()) + return false; + + return !isRoot() && getFieldDesc()->asDecl(); + } /// Accessor for information about the declaration site. - Descriptor *getDeclDesc() const { return Pointee->Desc; } + const Descriptor *getDeclDesc() const { + if (isIntegralPointer()) + return asIntPointer().Desc; + + assert(isBlockPointer()); + assert(asBlockPointer().Pointee); + return asBlockPointer().Pointee->Desc; + } SourceLocation getDeclLoc() const { return getDeclDesc()->getLocation(); } + /// Returns the expression or declaration the pointer has been created for. + DeclTy getSource() const { + if (isBlockPointer()) + return getDeclDesc()->getSource(); + + assert(isIntegralPointer()); + return asIntPointer().Desc ? asIntPointer().Desc->getSource() : DeclTy(); + } + /// Returns a pointer to the object of which this pointer is a field. - Pointer getBase() const { - if (Base == RootPtrMark) { + [[nodiscard]] Pointer getBase() const { + if (asBlockPointer().Base == RootPtrMark) { assert(Offset == PastEndMark && "cannot get base of a block"); - return Pointer(Pointee, Base, 0); + return Pointer(asBlockPointer().Pointee, asBlockPointer().Base, 0); } - assert(Offset == Base && "not an inner field"); - unsigned NewBase = Base - getInlineDesc()->Offset; - return Pointer(Pointee, NewBase, NewBase); + unsigned NewBase = asBlockPointer().Base - getInlineDesc()->Offset; + return Pointer(asBlockPointer().Pointee, NewBase, NewBase); } /// Returns the parent array. - Pointer getArray() const { - if (Base == RootPtrMark) { + [[nodiscard]] Pointer getArray() const { + if (asBlockPointer().Base == RootPtrMark) { assert(Offset != 0 && Offset != PastEndMark && "not an array element"); - return Pointer(Pointee, Base, 0); + return Pointer(asBlockPointer().Pointee, asBlockPointer().Base, 0); } - assert(Offset != Base && "not an array element"); - return Pointer(Pointee, Base, Base); + assert(Offset != asBlockPointer().Base && "not an array element"); + return Pointer(asBlockPointer().Pointee, asBlockPointer().Base, + asBlockPointer().Base); } /// Accessors for information about the innermost field. - Descriptor *getFieldDesc() const { - if (Base == 0 || Base == RootPtrMark) + const Descriptor *getFieldDesc() const { + if (isIntegralPointer()) + return asIntPointer().Desc; + + if (isRoot()) return getDeclDesc(); return getInlineDesc()->Desc; } /// Returns the type of the innermost field. - QualType getType() const { return getFieldDesc()->getType(); } + QualType getType() const { + if (inPrimitiveArray() && Offset != asBlockPointer().Base) { + // Unfortunately, complex and vector types are not array types in clang, + // but they are for us. + if (const auto *AT = getFieldDesc()->getType()->getAsArrayTypeUnsafe()) + return AT->getElementType(); + if (const auto *CT = getFieldDesc()->getType()->getAs<ComplexType>()) + return CT->getElementType(); + if (const auto *CT = getFieldDesc()->getType()->getAs<VectorType>()) + return CT->getElementType(); + } + return getFieldDesc()->getType(); + } + + [[nodiscard]] Pointer getDeclPtr() const { + return Pointer(asBlockPointer().Pointee); + } /// Returns the element size of the innermost field. size_t elemSize() const { - if (Base == RootPtrMark) + if (isIntegralPointer()) { + if (!asIntPointer().Desc) + return 1; + return asIntPointer().Desc->getElemSize(); + } + + if (asBlockPointer().Base == RootPtrMark) return getDeclDesc()->getSize(); return getFieldDesc()->getElemSize(); } /// Returns the total size of the innermost field. - size_t getSize() const { return getFieldDesc()->getSize(); } + size_t getSize() const { + assert(isBlockPointer()); + return getFieldDesc()->getSize(); + } /// Returns the offset into an array. unsigned getOffset() const { assert(Offset != PastEndMark && "invalid offset"); - if (Base == RootPtrMark) + if (asBlockPointer().Base == RootPtrMark) return Offset; unsigned Adjust = 0; - if (Offset != Base) { + if (Offset != asBlockPointer().Base) { if (getFieldDesc()->ElemDesc) Adjust = sizeof(InlineDescriptor); else - Adjust = sizeof(InitMap *); + Adjust = sizeof(InitMapPtr); } - return Offset - Base - Adjust; + return Offset - asBlockPointer().Base - Adjust; + } + + /// Whether this array refers to an array, but not + /// to the first element. + bool isArrayRoot() const { + return inArray() && Offset == asBlockPointer().Base; } /// Checks if the innermost field is an array. - bool inArray() const { return getFieldDesc()->IsArray; } + bool inArray() const { + if (isBlockPointer()) + return getFieldDesc()->IsArray; + return false; + } /// Checks if the structure is a primitive array. - bool inPrimitiveArray() const { return getFieldDesc()->isPrimitiveArray(); } + bool inPrimitiveArray() const { + if (isBlockPointer()) + return getFieldDesc()->isPrimitiveArray(); + return false; + } /// Checks if the structure is an array of unknown size. bool isUnknownSizeArray() const { + if (!isBlockPointer()) + return false; return getFieldDesc()->isUnknownSizeArray(); } /// Checks if the pointer points to an array. - bool isArrayElement() const { return Base != Offset; } + bool isArrayElement() const { + if (isBlockPointer()) + return inArray() && asBlockPointer().Base != Offset; + return false; + } /// Pointer points directly to a block. bool isRoot() const { - return (Base == 0 || Base == RootPtrMark) && Offset == 0; + if (isZero() || isIntegralPointer()) + return true; + return (asBlockPointer().Base == + asBlockPointer().Pointee->getDescriptor()->getMetadataSize() || + asBlockPointer().Base == 0); + } + /// If this pointer has an InlineDescriptor we can use to initialize. + bool canBeInitialized() const { + if (!isBlockPointer()) + return false; + + return asBlockPointer().Pointee && asBlockPointer().Base > 0; } + [[nodiscard]] const BlockPointer &asBlockPointer() const { + assert(isBlockPointer()); + return PointeeStorage.BS; + } + [[nodiscard]] const IntPointer &asIntPointer() const { + assert(isIntegralPointer()); + return PointeeStorage.Int; + } + bool isBlockPointer() const { return StorageKind == Storage::Block; } + bool isIntegralPointer() const { return StorageKind == Storage::Int; } + /// Returns the record descriptor of a class. - Record *getRecord() const { return getFieldDesc()->ElemRecord; } + const Record *getRecord() const { return getFieldDesc()->ElemRecord; } + /// Returns the element record type, if this is a non-primive array. + const Record *getElemRecord() const { + const Descriptor *ElemDesc = getFieldDesc()->ElemDesc; + return ElemDesc ? ElemDesc->ElemRecord : nullptr; + } /// Returns the field information. const FieldDecl *getField() const { return getFieldDesc()->asFieldDecl(); } @@ -226,43 +438,116 @@ public: bool isUnion() const; /// Checks if the storage is extern. - bool isExtern() const { return Pointee->isExtern(); } + bool isExtern() const { + if (isBlockPointer()) + return asBlockPointer().Pointee && asBlockPointer().Pointee->isExtern(); + return false; + } /// Checks if the storage is static. - bool isStatic() const { return Pointee->isStatic(); } + bool isStatic() const { + if (isIntegralPointer()) + return true; + assert(asBlockPointer().Pointee); + return asBlockPointer().Pointee->isStatic(); + } /// Checks if the storage is temporary. - bool isTemporary() const { return Pointee->isTemporary(); } + bool isTemporary() const { + if (isBlockPointer()) { + assert(asBlockPointer().Pointee); + return asBlockPointer().Pointee->isTemporary(); + } + return false; + } /// Checks if the storage is a static temporary. bool isStaticTemporary() const { return isStatic() && isTemporary(); } /// Checks if the field is mutable. - bool isMutable() const { return Base != 0 && getInlineDesc()->IsMutable; } + bool isMutable() const { + if (!isBlockPointer()) + return false; + return !isRoot() && getInlineDesc()->IsFieldMutable; + } + + bool isWeak() const { + if (isIntegralPointer()) + return false; + + assert(isBlockPointer()); + if (const ValueDecl *VD = getDeclDesc()->asValueDecl()) + return VD->isWeak(); + return false; + } /// Checks if an object was initialized. bool isInitialized() const; /// Checks if the object is active. - bool isActive() const { return Base == 0 || getInlineDesc()->IsActive; } + bool isActive() const { + if (!isBlockPointer()) + return true; + return isRoot() || getInlineDesc()->IsActive; + } /// Checks if a structure is a base class. bool isBaseClass() const { return isField() && getInlineDesc()->IsBase; } + bool isVirtualBaseClass() const { + return isField() && getInlineDesc()->IsVirtualBase; + } + /// Checks if the pointer points to a dummy value. + bool isDummy() const { + if (!isBlockPointer()) + return false; + + if (!asBlockPointer().Pointee) + return false; + + return getDeclDesc()->isDummy(); + } /// Checks if an object or a subfield is mutable. bool isConst() const { - return Base == 0 ? getDeclDesc()->IsConst : getInlineDesc()->IsConst; + if (isIntegralPointer()) + return true; + return isRoot() ? getDeclDesc()->IsConst : getInlineDesc()->IsConst; } /// Returns the declaration ID. - llvm::Optional<unsigned> getDeclID() const { return Pointee->getDeclID(); } + std::optional<unsigned> getDeclID() const { + if (isBlockPointer()) { + assert(asBlockPointer().Pointee); + return asBlockPointer().Pointee->getDeclID(); + } + return std::nullopt; + } /// Returns the byte offset from the start. unsigned getByteOffset() const { + if (isIntegralPointer()) + return asIntPointer().Value + Offset; + if (isOnePastEnd()) + return PastEndMark; return Offset; } /// Returns the number of elements. - unsigned getNumElems() const { return getSize() / elemSize(); } + unsigned getNumElems() const { + if (isIntegralPointer()) + return ~unsigned(0); + return getSize() / elemSize(); + } + + const Block *block() const { return asBlockPointer().Pointee; } /// Returns the index into an array. int64_t getIndex() const { - if (isElementPastEnd()) - return 1; + if (!isBlockPointer()) + return 0; + + if (isZero()) + return 0; + + // narrow()ed element in a composite array. + if (asBlockPointer().Base > sizeof(InlineDescriptor) && + asBlockPointer().Base == Offset) + return 0; + if (auto ElemSize = elemSize()) return getOffset() / ElemSize; return 0; @@ -270,21 +555,67 @@ public: /// Checks if the index is one past end. bool isOnePastEnd() const { - return isElementPastEnd() || getSize() == getOffset(); + if (isIntegralPointer()) + return false; + + if (!asBlockPointer().Pointee) + return false; + + if (isUnknownSizeArray()) + return false; + + return isElementPastEnd() || isPastEnd() || + (getSize() == getOffset() && !isZeroSizeArray()); + } + + /// Checks if the pointer points past the end of the object. + bool isPastEnd() const { + if (isIntegralPointer()) + return false; + + return !isZero() && Offset > PointeeStorage.BS.Pointee->getSize(); } /// Checks if the pointer is an out-of-bounds element pointer. bool isElementPastEnd() const { return Offset == PastEndMark; } + /// Checks if the pointer is pointing to a zero-size array. + bool isZeroSizeArray() const { return getFieldDesc()->isZeroSizeArray(); } + /// Dereferences the pointer, if it's live. template <typename T> T &deref() const { assert(isLive() && "Invalid pointer"); - return *reinterpret_cast<T *>(Pointee->data() + Offset); + assert(isBlockPointer()); + assert(asBlockPointer().Pointee); + assert(isDereferencable()); + assert(Offset + sizeof(T) <= + asBlockPointer().Pointee->getDescriptor()->getAllocSize()); + + if (isArrayRoot()) + return *reinterpret_cast<T *>(asBlockPointer().Pointee->rawData() + + asBlockPointer().Base + sizeof(InitMapPtr)); + + return *reinterpret_cast<T *>(asBlockPointer().Pointee->rawData() + Offset); } /// Dereferences a primitive element. template <typename T> T &elem(unsigned I) const { - return reinterpret_cast<T *>(Pointee->data())[I]; + assert(I < getNumElems()); + assert(isBlockPointer()); + assert(asBlockPointer().Pointee); + return reinterpret_cast<T *>(asBlockPointer().Pointee->data() + + sizeof(InitMapPtr))[I]; + } + + /// Whether this block can be read from at all. This is only true for + /// block pointers that point to a valid location inside that block. + bool isDereferencable() const { + if (!isBlockPointer()) + return false; + if (isPastEnd()) + return false; + + return true; } /// Initializes a field. @@ -294,52 +625,75 @@ public: /// Deactivates an entire strurcutre. void deactivate() const; + /// Compare two pointers. + ComparisonCategoryResult compare(const Pointer &Other) const { + if (!hasSameBase(*this, Other)) + return ComparisonCategoryResult::Unordered; + + if (Offset < Other.Offset) + return ComparisonCategoryResult::Less; + else if (Offset > Other.Offset) + return ComparisonCategoryResult::Greater; + + return ComparisonCategoryResult::Equal; + } + /// Checks if two pointers are comparable. static bool hasSameBase(const Pointer &A, const Pointer &B); /// Checks if two pointers can be subtracted. static bool hasSameArray(const Pointer &A, const Pointer &B); /// Prints the pointer. - void print(llvm::raw_ostream &OS) const { - OS << "{" << Base << ", " << Offset << ", "; - if (Pointee) - OS << Pointee->getSize(); - else - OS << "nullptr"; - OS << "}"; - } + void print(llvm::raw_ostream &OS) const; private: friend class Block; friend class DeadBlock; + friend class MemberPointer; + friend class InterpState; + friend struct InitMap; + friend class DynamicAllocator; - Pointer(Block *Pointee, unsigned Base, unsigned Offset); + Pointer(Block *Pointee, unsigned Base, uint64_t Offset); /// Returns the embedded descriptor preceding a field. - InlineDescriptor *getInlineDesc() const { return getDescriptor(Base); } + InlineDescriptor *getInlineDesc() const { + assert(asBlockPointer().Base != sizeof(GlobalInlineDescriptor)); + assert(asBlockPointer().Base <= asBlockPointer().Pointee->getSize()); + return getDescriptor(asBlockPointer().Base); + } /// Returns a descriptor at a given offset. InlineDescriptor *getDescriptor(unsigned Offset) const { assert(Offset != 0 && "Not a nested pointer"); - return reinterpret_cast<InlineDescriptor *>(Pointee->data() + Offset) - 1; + assert(isBlockPointer()); + assert(!isZero()); + return reinterpret_cast<InlineDescriptor *>( + asBlockPointer().Pointee->rawData() + Offset) - + 1; } - /// Returns a reference to the pointer which stores the initialization map. - InitMap *&getInitMap() const { - return *reinterpret_cast<InitMap **>(Pointee->data() + Base); + /// Returns a reference to the InitMapPtr which stores the initialization map. + InitMapPtr &getInitMap() const { + assert(isBlockPointer()); + assert(!isZero()); + return *reinterpret_cast<InitMapPtr *>(asBlockPointer().Pointee->rawData() + + asBlockPointer().Base); } - /// The block the pointer is pointing to. - Block *Pointee = nullptr; - /// Start of the current subfield. - unsigned Base = 0; - /// Offset into the block. - unsigned Offset = 0; + /// Offset into the storage. + uint64_t Offset = 0; /// Previous link in the pointer chain. Pointer *Prev = nullptr; /// Next link in the pointer chain. Pointer *Next = nullptr; + + union { + BlockPointer BS; + IntPointer Int; + } PointeeStorage; + Storage StorageKind = Storage::Int; }; inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const Pointer &P) { diff --git a/contrib/llvm-project/clang/lib/AST/Interp/PrimType.cpp b/contrib/llvm-project/clang/lib/AST/Interp/PrimType.cpp index 082bfaf3c207..3054e67d5c49 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/PrimType.cpp +++ b/contrib/llvm-project/clang/lib/AST/Interp/PrimType.cpp @@ -1,4 +1,4 @@ -//===--- Type.cpp - Types for the constexpr VM ------------------*- C++ -*-===// +//===--- PrimType.cpp - Types for the constexpr VM --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -7,6 +7,12 @@ //===----------------------------------------------------------------------===// #include "PrimType.h" +#include "Boolean.h" +#include "Floating.h" +#include "FunctionPointer.h" +#include "IntegralAP.h" +#include "MemberPointer.h" +#include "Pointer.h" using namespace clang; using namespace clang::interp; diff --git a/contrib/llvm-project/clang/lib/AST/Interp/PrimType.h b/contrib/llvm-project/clang/lib/AST/Interp/PrimType.h index f5f4f8e5c32d..20fb5e81774d 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/PrimType.h +++ b/contrib/llvm-project/clang/lib/AST/Interp/PrimType.h @@ -1,4 +1,4 @@ -//===--- PrimType.h - Types for the constexpr VM --------------------*- C++ -*-===// +//===--- PrimType.h - Types for the constexpr VM ----------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -13,30 +13,64 @@ #ifndef LLVM_CLANG_AST_INTERP_TYPE_H #define LLVM_CLANG_AST_INTERP_TYPE_H +#include "llvm/Support/raw_ostream.h" #include <climits> #include <cstddef> #include <cstdint> -#include "Boolean.h" -#include "Integral.h" -#include "Pointer.h" namespace clang { namespace interp { +class Pointer; +class Boolean; +class Floating; +class FunctionPointer; +class MemberPointer; +template <bool Signed> class IntegralAP; +template <unsigned Bits, bool Signed> class Integral; + /// Enumeration of the primitive types of the VM. enum PrimType : unsigned { - PT_Sint8, - PT_Uint8, - PT_Sint16, - PT_Uint16, - PT_Sint32, - PT_Uint32, - PT_Sint64, - PT_Uint64, - PT_Bool, - PT_Ptr, + PT_Sint8 = 0, + PT_Uint8 = 1, + PT_Sint16 = 2, + PT_Uint16 = 3, + PT_Sint32 = 4, + PT_Uint32 = 5, + PT_Sint64 = 6, + PT_Uint64 = 7, + PT_IntAP = 8, + PT_IntAPS = 9, + PT_Bool = 10, + PT_Float = 11, + PT_Ptr = 12, + PT_FnPtr = 13, + PT_MemberPtr = 14, }; +inline constexpr bool isPtrType(PrimType T) { + return T == PT_Ptr || T == PT_FnPtr || T == PT_MemberPtr; +} + +enum class CastKind : uint8_t { + Reinterpret, + Atomic, +}; +inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, + interp::CastKind CK) { + switch (CK) { + case interp::CastKind::Reinterpret: + OS << "reinterpret_cast"; + break; + case interp::CastKind::Atomic: + OS << "atomic"; + break; + } + return OS; +} + +constexpr bool isIntegralType(PrimType T) { return T <= PT_Bool; } + /// Mapping from primitive types to their representation. template <PrimType T> struct PrimConv; template <> struct PrimConv<PT_Sint8> { using T = Integral<8, true>; }; @@ -47,8 +81,21 @@ template <> struct PrimConv<PT_Sint32> { using T = Integral<32, true>; }; template <> struct PrimConv<PT_Uint32> { using T = Integral<32, false>; }; template <> struct PrimConv<PT_Sint64> { using T = Integral<64, true>; }; template <> struct PrimConv<PT_Uint64> { using T = Integral<64, false>; }; +template <> struct PrimConv<PT_IntAP> { + using T = IntegralAP<false>; +}; +template <> struct PrimConv<PT_IntAPS> { + using T = IntegralAP<true>; +}; +template <> struct PrimConv<PT_Float> { using T = Floating; }; template <> struct PrimConv<PT_Bool> { using T = Boolean; }; template <> struct PrimConv<PT_Ptr> { using T = Pointer; }; +template <> struct PrimConv<PT_FnPtr> { + using T = FunctionPointer; +}; +template <> struct PrimConv<PT_MemberPtr> { + using T = MemberPointer; +}; /// Returns the size of a primitive type in bytes. size_t primSize(PrimType Type); @@ -58,21 +105,11 @@ constexpr size_t align(size_t Size) { return ((Size + alignof(void *) - 1) / alignof(void *)) * alignof(void *); } -inline bool isPrimitiveIntegral(PrimType Type) { - switch (Type) { - case PT_Bool: - case PT_Sint8: - case PT_Uint8: - case PT_Sint16: - case PT_Uint16: - case PT_Sint32: - case PT_Uint32: - case PT_Sint64: - case PT_Uint64: - return true; - default: - return false; - } +constexpr bool aligned(uintptr_t Value) { return Value == align(Value); } +static_assert(aligned(sizeof(void *))); + +static inline bool aligned(const void *P) { + return aligned(reinterpret_cast<uintptr_t>(P)); } } // namespace interp @@ -81,35 +118,70 @@ inline bool isPrimitiveIntegral(PrimType Type) { /// Helper macro to simplify type switches. /// The macro implicitly exposes a type T in the scope of the inner block. #define TYPE_SWITCH_CASE(Name, B) \ - case Name: { using T = PrimConv<Name>::T; do {B;} while(0); break; } + case Name: { using T = PrimConv<Name>::T; B; break; } #define TYPE_SWITCH(Expr, B) \ - switch (Expr) { \ - TYPE_SWITCH_CASE(PT_Sint8, B) \ - TYPE_SWITCH_CASE(PT_Uint8, B) \ - TYPE_SWITCH_CASE(PT_Sint16, B) \ - TYPE_SWITCH_CASE(PT_Uint16, B) \ - TYPE_SWITCH_CASE(PT_Sint32, B) \ - TYPE_SWITCH_CASE(PT_Uint32, B) \ - TYPE_SWITCH_CASE(PT_Sint64, B) \ - TYPE_SWITCH_CASE(PT_Uint64, B) \ - TYPE_SWITCH_CASE(PT_Bool, B) \ - TYPE_SWITCH_CASE(PT_Ptr, B) \ - } -#define COMPOSITE_TYPE_SWITCH(Expr, B, D) \ - switch (Expr) { \ - TYPE_SWITCH_CASE(PT_Ptr, B) \ - default: do { D; } while(0); break; \ - } + do { \ + switch (Expr) { \ + TYPE_SWITCH_CASE(PT_Sint8, B) \ + TYPE_SWITCH_CASE(PT_Uint8, B) \ + TYPE_SWITCH_CASE(PT_Sint16, B) \ + TYPE_SWITCH_CASE(PT_Uint16, B) \ + TYPE_SWITCH_CASE(PT_Sint32, B) \ + TYPE_SWITCH_CASE(PT_Uint32, B) \ + TYPE_SWITCH_CASE(PT_Sint64, B) \ + TYPE_SWITCH_CASE(PT_Uint64, B) \ + TYPE_SWITCH_CASE(PT_IntAP, B) \ + TYPE_SWITCH_CASE(PT_IntAPS, B) \ + TYPE_SWITCH_CASE(PT_Float, B) \ + TYPE_SWITCH_CASE(PT_Bool, B) \ + TYPE_SWITCH_CASE(PT_Ptr, B) \ + TYPE_SWITCH_CASE(PT_FnPtr, B) \ + TYPE_SWITCH_CASE(PT_MemberPtr, B) \ + } \ + } while (0) + #define INT_TYPE_SWITCH(Expr, B) \ - switch (Expr) { \ - TYPE_SWITCH_CASE(PT_Sint8, B) \ - TYPE_SWITCH_CASE(PT_Uint8, B) \ - TYPE_SWITCH_CASE(PT_Sint16, B) \ - TYPE_SWITCH_CASE(PT_Uint16, B) \ - TYPE_SWITCH_CASE(PT_Sint32, B) \ - TYPE_SWITCH_CASE(PT_Uint32, B) \ - TYPE_SWITCH_CASE(PT_Sint64, B) \ - TYPE_SWITCH_CASE(PT_Uint64, B) \ - default: llvm_unreachable("not an integer"); \ - } + do { \ + switch (Expr) { \ + TYPE_SWITCH_CASE(PT_Sint8, B) \ + TYPE_SWITCH_CASE(PT_Uint8, B) \ + TYPE_SWITCH_CASE(PT_Sint16, B) \ + TYPE_SWITCH_CASE(PT_Uint16, B) \ + TYPE_SWITCH_CASE(PT_Sint32, B) \ + TYPE_SWITCH_CASE(PT_Uint32, B) \ + TYPE_SWITCH_CASE(PT_Sint64, B) \ + TYPE_SWITCH_CASE(PT_Uint64, B) \ + TYPE_SWITCH_CASE(PT_IntAP, B) \ + TYPE_SWITCH_CASE(PT_IntAPS, B) \ + TYPE_SWITCH_CASE(PT_Bool, B) \ + default: \ + llvm_unreachable("Not an integer value"); \ + } \ + } while (0) + +#define INT_TYPE_SWITCH_NO_BOOL(Expr, B) \ + do { \ + switch (Expr) { \ + TYPE_SWITCH_CASE(PT_Sint8, B) \ + TYPE_SWITCH_CASE(PT_Uint8, B) \ + TYPE_SWITCH_CASE(PT_Sint16, B) \ + TYPE_SWITCH_CASE(PT_Uint16, B) \ + TYPE_SWITCH_CASE(PT_Sint32, B) \ + TYPE_SWITCH_CASE(PT_Uint32, B) \ + TYPE_SWITCH_CASE(PT_Sint64, B) \ + TYPE_SWITCH_CASE(PT_Uint64, B) \ + TYPE_SWITCH_CASE(PT_IntAP, B) \ + TYPE_SWITCH_CASE(PT_IntAPS, B) \ + default: \ + llvm_unreachable("Not an integer value"); \ + } \ + } while (0) + +#define COMPOSITE_TYPE_SWITCH(Expr, B, D) \ + do { \ + switch (Expr) { \ + TYPE_SWITCH_CASE(PT_Ptr, B) \ + default: { D; break; } \ + } \ + } while (0) #endif diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Primitives.h b/contrib/llvm-project/clang/lib/AST/Interp/Primitives.h new file mode 100644 index 000000000000..e935dbfd3691 --- /dev/null +++ b/contrib/llvm-project/clang/lib/AST/Interp/Primitives.h @@ -0,0 +1,36 @@ +//===------ Primitives.h - Types for the constexpr VM -----------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Utilities and helper functions for all primitive types: +// - Integral +// - Floating +// - Boolean +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_AST_INTERP_PRIMITIVES_H +#define LLVM_CLANG_AST_INTERP_PRIMITIVES_H + +#include "clang/AST/ComparisonCategories.h" + +namespace clang { +namespace interp { + +/// Helper to compare two comparable types. +template <typename T> ComparisonCategoryResult Compare(const T &X, const T &Y) { + if (X < Y) + return ComparisonCategoryResult::Less; + if (X > Y) + return ComparisonCategoryResult::Greater; + return ComparisonCategoryResult::Equal; +} + +} // namespace interp +} // namespace clang + +#endif diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Program.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Program.cpp index fcbab0ea8172..5dd59d969853 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/Program.cpp +++ b/contrib/llvm-project/clang/lib/AST/Interp/Program.cpp @@ -7,9 +7,9 @@ //===----------------------------------------------------------------------===// #include "Program.h" -#include "ByteCodeStmtGen.h" #include "Context.h" #include "Function.h" +#include "Integral.h" #include "Opcode.h" #include "PrimType.h" #include "clang/AST/Decl.h" @@ -18,6 +18,21 @@ using namespace clang; using namespace clang::interp; +unsigned Program::getOrCreateNativePointer(const void *Ptr) { + auto It = NativePointerIndices.find(Ptr); + if (It != NativePointerIndices.end()) + return It->second; + + unsigned Idx = NativePointers.size(); + NativePointers.push_back(Ptr); + NativePointerIndices[Ptr] = Idx; + return Idx; +} + +const void *Program::getNativePointer(unsigned Idx) { + return NativePointers[Idx]; +} + unsigned Program::createGlobalString(const StringLiteral *S) { const size_t CharWidth = S->getCharByteWidth(); const size_t BitWidth = CharWidth * Ctx.getCharBit(); @@ -38,17 +53,21 @@ unsigned Program::createGlobalString(const StringLiteral *S) { } // Create a descriptor for the string. - Descriptor *Desc = allocateDescriptor(S, CharType, S->getLength() + 1, - /*isConst=*/true, - /*isTemporary=*/false, - /*isMutable=*/false); + Descriptor *Desc = + allocateDescriptor(S, CharType, Descriptor::GlobalMD, S->getLength() + 1, + /*isConst=*/true, + /*isTemporary=*/false, + /*isMutable=*/false); // Allocate storage for the string. // The byte length does not include the null terminator. unsigned I = Globals.size(); unsigned Sz = Desc->getAllocSize(); - auto *G = new (Allocator, Sz) Global(Desc, /*isStatic=*/true, + auto *G = new (Allocator, Sz) Global(Ctx.getEvalID(), Desc, /*isStatic=*/true, /*isExtern=*/false); + G->block()->invokeCtor(); + + new (G->block()->rawData()) InlineDescriptor(Desc); Globals.push_back(G); // Construct the string in storage. @@ -60,16 +79,19 @@ unsigned Program::createGlobalString(const StringLiteral *S) { case PT_Sint8: { using T = PrimConv<PT_Sint8>::T; Field.deref<T>() = T::from(CodePoint, BitWidth); + Field.initialize(); break; } case PT_Uint16: { using T = PrimConv<PT_Uint16>::T; Field.deref<T>() = T::from(CodePoint, BitWidth); + Field.initialize(); break; } case PT_Uint32: { using T = PrimConv<PT_Uint32>::T; Field.deref<T>() = T::from(CodePoint, BitWidth); + Field.initialize(); break; } default: @@ -79,153 +101,185 @@ unsigned Program::createGlobalString(const StringLiteral *S) { return I; } -Pointer Program::getPtrGlobal(unsigned Idx) { +Pointer Program::getPtrGlobal(unsigned Idx) const { assert(Idx < Globals.size()); return Pointer(Globals[Idx]->block()); } -llvm::Optional<unsigned> Program::getGlobal(const ValueDecl *VD) { - auto It = GlobalIndices.find(VD); - if (It != GlobalIndices.end()) +std::optional<unsigned> Program::getGlobal(const ValueDecl *VD) { + if (auto It = GlobalIndices.find(VD); It != GlobalIndices.end()) return It->second; - // Find any previous declarations which were aleady evaluated. - llvm::Optional<unsigned> Index; - for (const Decl *P = VD; P; P = P->getPreviousDecl()) { - auto It = GlobalIndices.find(P); - if (It != GlobalIndices.end()) { + // Find any previous declarations which were already evaluated. + std::optional<unsigned> Index; + for (const Decl *P = VD->getPreviousDecl(); P; P = P->getPreviousDecl()) { + if (auto It = GlobalIndices.find(P); It != GlobalIndices.end()) { Index = It->second; break; } } // Map the decl to the existing index. - if (Index) { + if (Index) GlobalIndices[VD] = *Index; - return {}; - } - return Index; + return std::nullopt; } -llvm::Optional<unsigned> Program::getOrCreateGlobal(const ValueDecl *VD) { +std::optional<unsigned> Program::getGlobal(const Expr *E) { + if (auto It = GlobalIndices.find(E); It != GlobalIndices.end()) + return It->second; + return std::nullopt; +} + +std::optional<unsigned> Program::getOrCreateGlobal(const ValueDecl *VD, + const Expr *Init) { if (auto Idx = getGlobal(VD)) return Idx; - if (auto Idx = createGlobal(VD)) { + if (auto Idx = createGlobal(VD, Init)) { GlobalIndices[VD] = *Idx; return Idx; } - return {}; + return std::nullopt; } -llvm::Optional<unsigned> Program::getOrCreateDummy(const ParmVarDecl *PD) { - auto &ASTCtx = Ctx.getASTContext(); - - // Create a pointer to an incomplete array of the specified elements. - QualType ElemTy = PD->getType()->castAs<PointerType>()->getPointeeType(); - QualType Ty = ASTCtx.getIncompleteArrayType(ElemTy, ArrayType::Normal, 0); - +std::optional<unsigned> Program::getOrCreateDummy(const ValueDecl *VD) { // Dedup blocks since they are immutable and pointers cannot be compared. - auto It = DummyParams.find(PD); - if (It != DummyParams.end()) + if (auto It = DummyVariables.find(VD); It != DummyVariables.end()) return It->second; - if (auto Idx = createGlobal(PD, Ty, /*isStatic=*/true, /*isExtern=*/true)) { - DummyParams[PD] = *Idx; - return Idx; - } - return {}; + QualType QT = VD->getType(); + if (const auto *RT = QT->getAs<ReferenceType>()) + QT = RT->getPointeeType(); + + Descriptor *Desc; + if (std::optional<PrimType> T = Ctx.classify(QT)) + Desc = createDescriptor(VD, *T, std::nullopt, true, false); + else + Desc = createDescriptor(VD, QT.getTypePtr(), std::nullopt, true, false); + if (!Desc) + Desc = allocateDescriptor(VD); + + assert(Desc); + Desc->makeDummy(); + + assert(Desc->isDummy()); + + // Allocate a block for storage. + unsigned I = Globals.size(); + + auto *G = new (Allocator, Desc->getAllocSize()) + Global(Ctx.getEvalID(), getCurrentDecl(), Desc, /*IsStatic=*/true, + /*IsExtern=*/false); + G->block()->invokeCtor(); + + Globals.push_back(G); + DummyVariables[VD] = I; + return I; } -llvm::Optional<unsigned> Program::createGlobal(const ValueDecl *VD) { +std::optional<unsigned> Program::createGlobal(const ValueDecl *VD, + const Expr *Init) { bool IsStatic, IsExtern; - if (auto *Var = dyn_cast<VarDecl>(VD)) { - IsStatic = !Var->hasLocalStorage(); - IsExtern = !Var->getAnyInitializer(); + if (const auto *Var = dyn_cast<VarDecl>(VD)) { + IsStatic = Context::shouldBeGloballyIndexed(VD); + IsExtern = Var->hasExternalStorage(); + } else if (isa<UnnamedGlobalConstantDecl, MSGuidDecl, + TemplateParamObjectDecl>(VD)) { + IsStatic = true; + IsExtern = false; } else { IsStatic = false; IsExtern = true; } - if (auto Idx = createGlobal(VD, VD->getType(), IsStatic, IsExtern)) { + if (auto Idx = createGlobal(VD, VD->getType(), IsStatic, IsExtern, Init)) { for (const Decl *P = VD; P; P = P->getPreviousDecl()) GlobalIndices[P] = *Idx; return *Idx; } - return {}; + return std::nullopt; } -llvm::Optional<unsigned> Program::createGlobal(const Expr *E) { - return createGlobal(E, E->getType(), /*isStatic=*/true, /*isExtern=*/false); +std::optional<unsigned> Program::createGlobal(const Expr *E) { + if (auto Idx = getGlobal(E)) + return Idx; + if (auto Idx = createGlobal(E, E->getType(), /*isStatic=*/true, + /*isExtern=*/false)) { + GlobalIndices[E] = *Idx; + return *Idx; + } + return std::nullopt; } -llvm::Optional<unsigned> Program::createGlobal(const DeclTy &D, QualType Ty, - bool IsStatic, bool IsExtern) { +std::optional<unsigned> Program::createGlobal(const DeclTy &D, QualType Ty, + bool IsStatic, bool IsExtern, + const Expr *Init) { // Create a descriptor for the global. Descriptor *Desc; const bool IsConst = Ty.isConstQualified(); const bool IsTemporary = D.dyn_cast<const Expr *>(); - if (auto T = Ctx.classify(Ty)) { - Desc = createDescriptor(D, *T, IsConst, IsTemporary); - } else { - Desc = createDescriptor(D, Ty.getTypePtr(), IsConst, IsTemporary); - } + if (std::optional<PrimType> T = Ctx.classify(Ty)) + Desc = createDescriptor(D, *T, Descriptor::GlobalMD, IsConst, IsTemporary); + else + Desc = createDescriptor(D, Ty.getTypePtr(), Descriptor::GlobalMD, IsConst, + IsTemporary); + if (!Desc) - return {}; + return std::nullopt; // Allocate a block for storage. unsigned I = Globals.size(); auto *G = new (Allocator, Desc->getAllocSize()) - Global(getCurrentDecl(), Desc, IsStatic, IsExtern); + Global(Ctx.getEvalID(), getCurrentDecl(), Desc, IsStatic, IsExtern); G->block()->invokeCtor(); + // Initialize InlineDescriptor fields. + auto *GD = new (G->block()->rawData()) GlobalInlineDescriptor(); + if (!Init) + GD->InitState = GlobalInitState::NoInitializer; Globals.push_back(G); return I; } Function *Program::getFunction(const FunctionDecl *F) { - F = F->getDefinition(); + F = F->getCanonicalDecl(); + assert(F); auto It = Funcs.find(F); return It == Funcs.end() ? nullptr : It->second.get(); } -llvm::Expected<Function *> Program::getOrCreateFunction(const FunctionDecl *F) { - if (Function *Func = getFunction(F)) { - return Func; - } - - // Try to compile the function if it wasn't compiled yet. - if (const FunctionDecl *FD = F->getDefinition()) - return ByteCodeStmtGen<ByteCodeEmitter>(Ctx, *this).compileFunc(FD); - - // A relocation which traps if not resolved. - return nullptr; -} - Record *Program::getOrCreateRecord(const RecordDecl *RD) { // Use the actual definition as a key. RD = RD->getDefinition(); if (!RD) return nullptr; + if (!RD->isCompleteDefinition()) + return nullptr; + // Deduplicate records. - auto It = Records.find(RD); - if (It != Records.end()) { + if (auto It = Records.find(RD); It != Records.end()) return It->second; - } + + // We insert nullptr now and replace that later, so recursive calls + // to this function with the same RecordDecl don't run into + // infinite recursion. + Records.insert({RD, nullptr}); // Number of bytes required by fields and base classes. - unsigned Size = 0; + unsigned BaseSize = 0; // Number of bytes required by virtual base. unsigned VirtSize = 0; // Helper to get a base descriptor. - auto GetBaseDesc = [this](const RecordDecl *BD, Record *BR) -> Descriptor * { + auto GetBaseDesc = [this](const RecordDecl *BD, + const Record *BR) -> const Descriptor * { if (!BR) return nullptr; - return allocateDescriptor(BD, BR, /*isConst=*/false, + return allocateDescriptor(BD, BR, std::nullopt, /*isConst=*/false, /*isTemporary=*/false, /*isMutable=*/false); }; @@ -233,131 +287,158 @@ Record *Program::getOrCreateRecord(const RecordDecl *RD) { // Reserve space for base classes. Record::BaseList Bases; Record::VirtualBaseList VirtBases; - if (auto *CD = dyn_cast<CXXRecordDecl>(RD)) { + if (const auto *CD = dyn_cast<CXXRecordDecl>(RD)) { for (const CXXBaseSpecifier &Spec : CD->bases()) { if (Spec.isVirtual()) continue; - const RecordDecl *BD = Spec.getType()->castAs<RecordType>()->getDecl(); - Record *BR = getOrCreateRecord(BD); - if (Descriptor *Desc = GetBaseDesc(BD, BR)) { - Size += align(sizeof(InlineDescriptor)); - Bases.push_back({BD, Size, Desc, BR}); - Size += align(BR->getSize()); - continue; - } - return nullptr; + // In error cases, the base might not be a RecordType. + const auto *RT = Spec.getType()->getAs<RecordType>(); + if (!RT) + return nullptr; + const RecordDecl *BD = RT->getDecl(); + const Record *BR = getOrCreateRecord(BD); + + const Descriptor *Desc = GetBaseDesc(BD, BR); + if (!Desc) + return nullptr; + + BaseSize += align(sizeof(InlineDescriptor)); + Bases.push_back({BD, BaseSize, Desc, BR}); + BaseSize += align(BR->getSize()); } for (const CXXBaseSpecifier &Spec : CD->vbases()) { - const RecordDecl *BD = Spec.getType()->castAs<RecordType>()->getDecl(); - Record *BR = getOrCreateRecord(BD); + const auto *RT = Spec.getType()->getAs<RecordType>(); + if (!RT) + return nullptr; - if (Descriptor *Desc = GetBaseDesc(BD, BR)) { - VirtSize += align(sizeof(InlineDescriptor)); - VirtBases.push_back({BD, VirtSize, Desc, BR}); - VirtSize += align(BR->getSize()); - continue; - } - return nullptr; + const RecordDecl *BD = RT->getDecl(); + const Record *BR = getOrCreateRecord(BD); + + const Descriptor *Desc = GetBaseDesc(BD, BR); + if (!Desc) + return nullptr; + + VirtSize += align(sizeof(InlineDescriptor)); + VirtBases.push_back({BD, VirtSize, Desc, BR}); + VirtSize += align(BR->getSize()); } } // Reserve space for fields. Record::FieldList Fields; for (const FieldDecl *FD : RD->fields()) { + // Note that we DO create fields and descriptors + // for unnamed bitfields here, even though we later ignore + // them everywhere. That's so the FieldDecl's getFieldIndex() matches. + // Reserve space for the field's descriptor and the offset. - Size += align(sizeof(InlineDescriptor)); + BaseSize += align(sizeof(InlineDescriptor)); // Classify the field and add its metadata. QualType FT = FD->getType(); const bool IsConst = FT.isConstQualified(); const bool IsMutable = FD->isMutable(); - Descriptor *Desc; - if (llvm::Optional<PrimType> T = Ctx.classify(FT)) { - Desc = createDescriptor(FD, *T, IsConst, /*isTemporary=*/false, - IsMutable); + const Descriptor *Desc; + if (std::optional<PrimType> T = Ctx.classify(FT)) { + Desc = createDescriptor(FD, *T, std::nullopt, IsConst, + /*isTemporary=*/false, IsMutable); } else { - Desc = createDescriptor(FD, FT.getTypePtr(), IsConst, + Desc = createDescriptor(FD, FT.getTypePtr(), std::nullopt, IsConst, /*isTemporary=*/false, IsMutable); } if (!Desc) return nullptr; - Fields.push_back({FD, Size, Desc}); - Size += align(Desc->getAllocSize()); + Fields.push_back({FD, BaseSize, Desc}); + BaseSize += align(Desc->getAllocSize()); } Record *R = new (Allocator) Record(RD, std::move(Bases), std::move(Fields), - std::move(VirtBases), VirtSize, Size); - Records.insert({RD, R}); + std::move(VirtBases), VirtSize, BaseSize); + Records[RD] = R; return R; } Descriptor *Program::createDescriptor(const DeclTy &D, const Type *Ty, + Descriptor::MetadataSize MDSize, bool IsConst, bool IsTemporary, - bool IsMutable) { + bool IsMutable, const Expr *Init) { + // Classes and structures. - if (auto *RT = Ty->getAs<RecordType>()) { - if (auto *Record = getOrCreateRecord(RT->getDecl())) - return allocateDescriptor(D, Record, IsConst, IsTemporary, IsMutable); + if (const auto *RT = Ty->getAs<RecordType>()) { + if (const auto *Record = getOrCreateRecord(RT->getDecl())) + return allocateDescriptor(D, Record, MDSize, IsConst, IsTemporary, + IsMutable); } // Arrays. - if (auto ArrayType = Ty->getAsArrayTypeUnsafe()) { + if (const auto ArrayType = Ty->getAsArrayTypeUnsafe()) { QualType ElemTy = ArrayType->getElementType(); // Array of well-known bounds. if (auto CAT = dyn_cast<ConstantArrayType>(ArrayType)) { - size_t NumElems = CAT->getSize().getZExtValue(); - if (llvm::Optional<PrimType> T = Ctx.classify(ElemTy)) { + size_t NumElems = CAT->getZExtSize(); + if (std::optional<PrimType> T = Ctx.classify(ElemTy)) { // Arrays of primitives. unsigned ElemSize = primSize(*T); if (std::numeric_limits<unsigned>::max() / ElemSize <= NumElems) { return {}; } - return allocateDescriptor(D, *T, NumElems, IsConst, IsTemporary, + return allocateDescriptor(D, *T, MDSize, NumElems, IsConst, IsTemporary, IsMutable); } else { // Arrays of composites. In this case, the array is a list of pointers, // followed by the actual elements. - Descriptor *Desc = - createDescriptor(D, ElemTy.getTypePtr(), IsConst, IsTemporary); - if (!Desc) + const Descriptor *ElemDesc = createDescriptor( + D, ElemTy.getTypePtr(), std::nullopt, IsConst, IsTemporary); + if (!ElemDesc) return nullptr; - InterpSize ElemSize = Desc->getAllocSize() + sizeof(InlineDescriptor); + unsigned ElemSize = + ElemDesc->getAllocSize() + sizeof(InlineDescriptor); if (std::numeric_limits<unsigned>::max() / ElemSize <= NumElems) return {}; - return allocateDescriptor(D, Desc, NumElems, IsConst, IsTemporary, - IsMutable); + return allocateDescriptor(D, ElemDesc, MDSize, NumElems, IsConst, + IsTemporary, IsMutable); } } // Array of unknown bounds - cannot be accessed and pointer arithmetic // is forbidden on pointers to such objects. - if (isa<IncompleteArrayType>(ArrayType)) { - if (llvm::Optional<PrimType> T = Ctx.classify(ElemTy)) { - return allocateDescriptor(D, *T, IsTemporary, + if (isa<IncompleteArrayType>(ArrayType) || + isa<VariableArrayType>(ArrayType)) { + if (std::optional<PrimType> T = Ctx.classify(ElemTy)) { + return allocateDescriptor(D, *T, MDSize, IsTemporary, Descriptor::UnknownSize{}); } else { - Descriptor *Desc = - createDescriptor(D, ElemTy.getTypePtr(), IsConst, IsTemporary); + const Descriptor *Desc = createDescriptor(D, ElemTy.getTypePtr(), + MDSize, IsConst, IsTemporary); if (!Desc) return nullptr; - return allocateDescriptor(D, Desc, IsTemporary, + return allocateDescriptor(D, Desc, MDSize, IsTemporary, Descriptor::UnknownSize{}); } } } // Atomic types. - if (auto *AT = Ty->getAs<AtomicType>()) { + if (const auto *AT = Ty->getAs<AtomicType>()) { const Type *InnerTy = AT->getValueType().getTypePtr(); - return createDescriptor(D, InnerTy, IsConst, IsTemporary, IsMutable); + return createDescriptor(D, InnerTy, MDSize, IsConst, IsTemporary, + IsMutable); } // Complex types - represented as arrays of elements. - if (auto *CT = Ty->getAs<ComplexType>()) { + if (const auto *CT = Ty->getAs<ComplexType>()) { PrimType ElemTy = *Ctx.classify(CT->getElementType()); - return allocateDescriptor(D, ElemTy, 2, IsConst, IsTemporary, IsMutable); + return allocateDescriptor(D, ElemTy, MDSize, 2, IsConst, IsTemporary, + IsMutable); + } + + // Same with vector types. + if (const auto *VT = Ty->getAs<VectorType>()) { + PrimType ElemTy = *Ctx.classify(VT->getElementType()); + return allocateDescriptor(D, ElemTy, MDSize, VT->getNumElements(), IsConst, + IsTemporary, IsMutable); } return nullptr; diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Program.h b/contrib/llvm-project/clang/lib/AST/Interp/Program.h index 5f0012db9b3f..1cabc5212180 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/Program.h +++ b/contrib/llvm-project/clang/lib/AST/Interp/Program.h @@ -29,26 +29,45 @@ namespace clang { class RecordDecl; class Expr; class FunctionDecl; -class Stmt; class StringLiteral; class VarDecl; namespace interp { class Context; -class State; -class Record; -class Scope; /// The program contains and links the bytecode for all functions. -class Program { +class Program final { public: Program(Context &Ctx) : Ctx(Ctx) {} + ~Program() { + // Manually destroy all the blocks. They are almost all harmless, + // but primitive arrays might have an InitMap* heap allocated and + // that needs to be freed. + for (Global *G : Globals) + if (Block *B = G->block(); B->isInitialized()) + B->invokeDtor(); + + // Records might actually allocate memory themselves, but they + // are allocated using a BumpPtrAllocator. Call their desctructors + // here manually so they are properly freeing their resources. + for (auto RecordPair : Records) { + if (Record *R = RecordPair.second) + R->~Record(); + } + } + + /// Marshals a native pointer to an ID for embedding in bytecode. + unsigned getOrCreateNativePointer(const void *Ptr); + + /// Returns the value of a marshalled native pointer. + const void *getNativePointer(unsigned Idx); + /// Emits a string literal among global data. unsigned createGlobalString(const StringLiteral *S); /// Returns a pointer to a global. - Pointer getPtrGlobal(unsigned Idx); + Pointer getPtrGlobal(unsigned Idx) const; /// Returns the value of a global. Block *getGlobal(unsigned Idx) { @@ -57,23 +76,26 @@ public: } /// Finds a global's index. - llvm::Optional<unsigned> getGlobal(const ValueDecl *VD); + std::optional<unsigned> getGlobal(const ValueDecl *VD); + std::optional<unsigned> getGlobal(const Expr *E); /// Returns or creates a global an creates an index to it. - llvm::Optional<unsigned> getOrCreateGlobal(const ValueDecl *VD); + std::optional<unsigned> getOrCreateGlobal(const ValueDecl *VD, + const Expr *Init = nullptr); - /// Returns or creates a dummy value for parameters. - llvm::Optional<unsigned> getOrCreateDummy(const ParmVarDecl *PD); + /// Returns or creates a dummy value for unknown declarations. + std::optional<unsigned> getOrCreateDummy(const ValueDecl *VD); /// Creates a global and returns its index. - llvm::Optional<unsigned> createGlobal(const ValueDecl *VD); + std::optional<unsigned> createGlobal(const ValueDecl *VD, const Expr *Init); /// Creates a global from a lifetime-extended temporary. - llvm::Optional<unsigned> createGlobal(const Expr *E); + std::optional<unsigned> createGlobal(const Expr *E); /// Creates a new function from a code range. template <typename... Ts> Function *createFunction(const FunctionDecl *Def, Ts &&... Args) { + Def = Def->getCanonicalDecl(); auto *Func = new Function(*this, Def, std::forward<Ts>(Args)...); Funcs.insert({Def, std::unique_ptr<Function>(Func)}); return Func; @@ -89,31 +111,30 @@ public: /// Returns a function. Function *getFunction(const FunctionDecl *F); - /// Returns a pointer to a function if it exists and can be compiled. - /// If a function couldn't be compiled, an error is returned. - /// If a function was not yet defined, a null pointer is returned. - llvm::Expected<Function *> getOrCreateFunction(const FunctionDecl *F); - /// Returns a record or creates one if it does not exist. Record *getOrCreateRecord(const RecordDecl *RD); /// Creates a descriptor for a primitive type. Descriptor *createDescriptor(const DeclTy &D, PrimType Type, - bool IsConst = false, - bool IsTemporary = false, + Descriptor::MetadataSize MDSize = std::nullopt, + bool IsConst = false, bool IsTemporary = false, bool IsMutable = false) { - return allocateDescriptor(D, Type, IsConst, IsTemporary, IsMutable); + return allocateDescriptor(D, Type, MDSize, IsConst, IsTemporary, IsMutable); } /// Creates a descriptor for a composite type. Descriptor *createDescriptor(const DeclTy &D, const Type *Ty, + Descriptor::MetadataSize MDSize = std::nullopt, bool IsConst = false, bool IsTemporary = false, - bool IsMutable = false); + bool IsMutable = false, + const Expr *Init = nullptr); /// Context to manage declaration lifetimes. class DeclScope { public: - DeclScope(Program &P, const VarDecl *VD) : P(P) { P.startDeclaration(VD); } + DeclScope(Program &P, const ValueDecl *VD) : P(P) { + P.startDeclaration(VD); + } ~DeclScope() { P.endDeclaration(); } private: @@ -121,17 +142,18 @@ public: }; /// Returns the current declaration ID. - llvm::Optional<unsigned> getCurrentDecl() const { + std::optional<unsigned> getCurrentDecl() const { if (CurrentDeclaration == NoDeclaration) - return llvm::Optional<unsigned>{}; + return std::optional<unsigned>{}; return LastDeclaration; } private: friend class DeclScope; - llvm::Optional<unsigned> createGlobal(const DeclTy &D, QualType Ty, - bool IsStatic, bool IsExtern); + std::optional<unsigned> createGlobal(const DeclTy &D, QualType Ty, + bool IsStatic, bool IsExtern, + const Expr *Init = nullptr); /// Reference to the VM context. Context &Ctx; @@ -143,6 +165,11 @@ private: /// Function relocation locations. llvm::DenseMap<const FunctionDecl *, std::vector<unsigned>> Relocs; + /// Native pointers referenced by bytecode. + std::vector<const void *> NativePointers; + /// Cached native pointer indices. + llvm::DenseMap<const void *, unsigned> NativePointerIndices; + /// Custom allocator for global storage. using PoolAllocTy = llvm::BumpPtrAllocatorImpl<llvm::MallocAllocator>; @@ -161,9 +188,10 @@ private: } /// Return a pointer to the data. - char *data() { return B.data(); } + std::byte *data() { return B.data(); } /// Return a pointer to the block. Block *block() { return &B; } + const Block *block() const { return &B; } private: /// Required metadata - does not actually track pointers. @@ -182,7 +210,7 @@ private: llvm::DenseMap<const RecordDecl *, Record *> Records; /// Dummy parameter to generate pointers from. - llvm::DenseMap<const ParmVarDecl *, unsigned> DummyParams; + llvm::DenseMap<const ValueDecl *, unsigned> DummyVariables; /// Creates a new descriptor. template <typename... Ts> @@ -198,7 +226,7 @@ private: unsigned CurrentDeclaration = NoDeclaration; /// Starts evaluating a declaration. - void startDeclaration(const VarDecl *Decl) { + void startDeclaration(const ValueDecl *Decl) { LastDeclaration += 1; CurrentDeclaration = LastDeclaration; } diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Record.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Record.cpp index f440c4705051..ac01524e1caf 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/Record.cpp +++ b/contrib/llvm-project/clang/lib/AST/Interp/Record.cpp @@ -7,6 +7,7 @@ //===----------------------------------------------------------------------===// #include "Record.h" +#include "clang/AST/ASTContext.h" using namespace clang; using namespace clang::interp; @@ -15,7 +16,7 @@ Record::Record(const RecordDecl *Decl, BaseList &&SrcBases, FieldList &&SrcFields, VirtualBaseList &&SrcVirtualBases, unsigned VirtualSize, unsigned BaseSize) : Decl(Decl), Bases(std::move(SrcBases)), Fields(std::move(SrcFields)), - BaseSize(BaseSize), VirtualSize(VirtualSize) { + BaseSize(BaseSize), VirtualSize(VirtualSize), IsUnion(Decl->isUnion()) { for (Base &V : SrcVirtualBases) VirtualBases.push_back({ V.Decl, V.Offset + BaseSize, V.Desc, V.R }); @@ -27,6 +28,14 @@ Record::Record(const RecordDecl *Decl, BaseList &&SrcBases, VirtualBaseMap[V.Decl] = &V; } +const std::string Record::getName() const { + std::string Ret; + llvm::raw_string_ostream OS(Ret); + Decl->getNameForDiagnostic(OS, Decl->getASTContext().getPrintingPolicy(), + /*Qualified=*/true); + return Ret; +} + const Record::Field *Record::getField(const FieldDecl *FD) const { auto It = FieldMap.find(FD); assert(It != FieldMap.end() && "Missing field"); @@ -39,6 +48,14 @@ const Record::Base *Record::getBase(const RecordDecl *FD) const { return It->second; } +const Record::Base *Record::getBase(QualType T) const { + if (auto *RT = T->getAs<RecordType>()) { + const RecordDecl *RD = RT->getDecl(); + return BaseMap.lookup(RD); + } + return nullptr; +} + const Record::Base *Record::getVirtualBase(const RecordDecl *FD) const { auto It = VirtualBaseMap.find(FD); assert(It != VirtualBaseMap.end() && "Missing virtual base"); diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Record.h b/contrib/llvm-project/clang/lib/AST/Interp/Record.h index 9cdee9003752..83e15b125f77 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/Record.h +++ b/contrib/llvm-project/clang/lib/AST/Interp/Record.h @@ -13,28 +13,31 @@ #ifndef LLVM_CLANG_AST_INTERP_RECORD_H #define LLVM_CLANG_AST_INTERP_RECORD_H -#include "Pointer.h" +#include "Descriptor.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclCXX.h" namespace clang { namespace interp { class Program; /// Structure/Class descriptor. -class Record { +class Record final { public: /// Describes a record field. struct Field { const FieldDecl *Decl; unsigned Offset; - Descriptor *Desc; + const Descriptor *Desc; + bool isBitField() const { return Decl->isBitField(); } }; /// Describes a base class. struct Base { const RecordDecl *Decl; unsigned Offset; - Descriptor *Desc; - Record *R; + const Descriptor *Desc; + const Record *R; }; /// Mapping from identifiers to field descriptors. @@ -47,8 +50,10 @@ public: public: /// Returns the underlying declaration. const RecordDecl *getDecl() const { return Decl; } + /// Returns the name of the underlying declaration. + const std::string getName() const; /// Checks if the record is a union. - bool isUnion() const { return getDecl()->isUnion(); } + bool isUnion() const { return IsUnion; } /// Returns the size of the record. unsigned getSize() const { return BaseSize; } /// Returns the full size of the record, including records. @@ -57,32 +62,47 @@ public: const Field *getField(const FieldDecl *FD) const; /// Returns a base descriptor. const Base *getBase(const RecordDecl *FD) const; + /// Returns a base descriptor. + const Base *getBase(QualType T) const; /// Returns a virtual base descriptor. const Base *getVirtualBase(const RecordDecl *RD) const; + /// Returns the destructor of the record, if any. + const CXXDestructorDecl *getDestructor() const { + if (const auto *CXXDecl = dyn_cast<CXXRecordDecl>(Decl)) + return CXXDecl->getDestructor(); + return nullptr; + } using const_field_iter = FieldList::const_iterator; llvm::iterator_range<const_field_iter> fields() const { return llvm::make_range(Fields.begin(), Fields.end()); } - unsigned getNumFields() { return Fields.size(); } - Field *getField(unsigned I) { return &Fields[I]; } + unsigned getNumFields() const { return Fields.size(); } + const Field *getField(unsigned I) const { return &Fields[I]; } using const_base_iter = BaseList::const_iterator; llvm::iterator_range<const_base_iter> bases() const { return llvm::make_range(Bases.begin(), Bases.end()); } - unsigned getNumBases() { return Bases.size(); } - Base *getBase(unsigned I) { return &Bases[I]; } + unsigned getNumBases() const { return Bases.size(); } + const Base *getBase(unsigned I) const { + assert(I < getNumBases()); + return &Bases[I]; + } using const_virtual_iter = VirtualBaseList::const_iterator; llvm::iterator_range<const_virtual_iter> virtual_bases() const { return llvm::make_range(VirtualBases.begin(), VirtualBases.end()); } - unsigned getNumVirtualBases() { return VirtualBases.size(); } - Base *getVirtualBase(unsigned I) { return &VirtualBases[I]; } + unsigned getNumVirtualBases() const { return VirtualBases.size(); } + const Base *getVirtualBase(unsigned I) const { return &VirtualBases[I]; } + + void dump(llvm::raw_ostream &OS, unsigned Indentation = 0, + unsigned Offset = 0) const; + void dump() const { dump(llvm::errs()); } private: /// Constructor used by Program to create record descriptors. @@ -103,16 +123,17 @@ private: VirtualBaseList VirtualBases; /// Mapping from declarations to bases. - llvm::DenseMap<const RecordDecl *, Base *> BaseMap; + llvm::DenseMap<const RecordDecl *, const Base *> BaseMap; /// Mapping from field identifiers to descriptors. - llvm::DenseMap<const FieldDecl *, Field *> FieldMap; + llvm::DenseMap<const FieldDecl *, const Field *> FieldMap; /// Mapping from declarations to virtual bases. llvm::DenseMap<const RecordDecl *, Base *> VirtualBaseMap; - /// Mapping from /// Size of the structure. unsigned BaseSize; /// Size of all virtual bases. unsigned VirtualSize; + /// If this record is a union. + bool IsUnion; }; } // namespace interp diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Source.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Source.cpp index 4bec87812638..45cd0ad4fd42 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/Source.cpp +++ b/contrib/llvm-project/clang/lib/AST/Interp/Source.cpp @@ -22,18 +22,32 @@ SourceLocation SourceInfo::getLoc() const { return SourceLocation(); } +SourceRange SourceInfo::getRange() const { + if (const Expr *E = asExpr()) + return E->getSourceRange(); + if (const Stmt *S = asStmt()) + return S->getSourceRange(); + if (const Decl *D = asDecl()) + return D->getSourceRange(); + return SourceRange(); +} + const Expr *SourceInfo::asExpr() const { - if (auto *S = Source.dyn_cast<const Stmt *>()) + if (const auto *S = Source.dyn_cast<const Stmt *>()) return dyn_cast<Expr>(S); return nullptr; } -const Expr *SourceMapper::getExpr(Function *F, CodePtr PC) const { +const Expr *SourceMapper::getExpr(const Function *F, CodePtr PC) const { if (const Expr *E = getSource(F, PC).asExpr()) return E; llvm::report_fatal_error("missing source expression"); } -SourceLocation SourceMapper::getLocation(Function *F, CodePtr PC) const { +SourceLocation SourceMapper::getLocation(const Function *F, CodePtr PC) const { return getSource(F, PC).getLoc(); } + +SourceRange SourceMapper::getRange(const Function *F, CodePtr PC) const { + return getSource(F, PC).getRange(); +} diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Source.h b/contrib/llvm-project/clang/lib/AST/Interp/Source.h index 19c652b7331a..c28b488ff554 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/Source.h +++ b/contrib/llvm-project/clang/lib/AST/Interp/Source.h @@ -13,16 +13,21 @@ #ifndef LLVM_CLANG_AST_INTERP_SOURCE_H #define LLVM_CLANG_AST_INTERP_SOURCE_H -#include "clang/AST/Decl.h" +#include "PrimType.h" +#include "clang/AST/DeclBase.h" #include "clang/AST/Stmt.h" +#include "llvm/ADT/PointerUnion.h" #include "llvm/Support/Endian.h" namespace clang { +class Expr; +class SourceLocation; +class SourceRange; namespace interp { class Function; /// Pointer into the code segment. -class CodePtr { +class CodePtr final { public: CodePtr() : Ptr(nullptr) {} @@ -42,49 +47,36 @@ public: } bool operator!=(const CodePtr &RHS) const { return Ptr != RHS.Ptr; } + const std::byte *operator*() const { return Ptr; } - /// Reads data and advances the pointer. - template <typename T> T read() { - T Value = ReadHelper<T>(Ptr); - Ptr += sizeof(T); - return Value; - } + operator bool() const { return Ptr; } -private: - /// Constructor used by Function to generate pointers. - CodePtr(const char *Ptr) : Ptr(Ptr) {} - - /// Helper to decode a value or a pointer. - template <typename T> - static std::enable_if_t<!std::is_pointer<T>::value, T> - ReadHelper(const char *Ptr) { - using namespace llvm::support; - return endian::read<T, endianness::native, 1>(Ptr); - } - - template <typename T> - static std::enable_if_t<std::is_pointer<T>::value, T> - ReadHelper(const char *Ptr) { + /// Reads data and advances the pointer. + template <typename T> std::enable_if_t<!std::is_pointer<T>::value, T> read() { + assert(aligned(Ptr)); using namespace llvm::support; - auto Punned = endian::read<uintptr_t, endianness::native, 1>(Ptr); - return reinterpret_cast<T>(Punned); + T Value = endian::read<T, llvm::endianness::native>(Ptr); + Ptr += align(sizeof(T)); + return Value; } private: friend class Function; - + /// Constructor used by Function to generate pointers. + CodePtr(const std::byte *Ptr) : Ptr(Ptr) {} /// Pointer into the code owned by a function. - const char *Ptr; + const std::byte *Ptr; }; /// Describes the statement/declaration an opcode was generated from. -class SourceInfo { +class SourceInfo final { public: SourceInfo() {} SourceInfo(const Stmt *E) : Source(E) {} SourceInfo(const Decl *D) : Source(D) {} SourceLocation getLoc() const; + SourceRange getRange() const; const Stmt *asStmt() const { return Source.dyn_cast<const Stmt *>(); } const Decl *asDecl() const { return Source.dyn_cast<const Decl *>(); } @@ -104,12 +96,13 @@ public: virtual ~SourceMapper() {} /// Returns source information for a given PC in a function. - virtual SourceInfo getSource(Function *F, CodePtr PC) const = 0; + virtual SourceInfo getSource(const Function *F, CodePtr PC) const = 0; /// Returns the expression if an opcode belongs to one, null otherwise. - const Expr *getExpr(Function *F, CodePtr PC) const; + const Expr *getExpr(const Function *F, CodePtr PC) const; /// Returns the location from which an opcode originates. - SourceLocation getLocation(Function *F, CodePtr PC) const; + SourceLocation getLocation(const Function *F, CodePtr PC) const; + SourceRange getRange(const Function *F, CodePtr PC) const; }; } // namespace interp diff --git a/contrib/llvm-project/clang/lib/AST/Interp/State.cpp b/contrib/llvm-project/clang/lib/AST/Interp/State.cpp index 56774f88fb45..0d9dadec4b95 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/State.cpp +++ b/contrib/llvm-project/clang/lib/AST/Interp/State.cpp @@ -11,6 +11,7 @@ #include "Program.h" #include "clang/AST/ASTContext.h" #include "clang/AST/CXXInheritance.h" +#include "clang/AST/OptionalDiagnostic.h" using namespace clang; using namespace clang::interp; @@ -125,16 +126,16 @@ void State::addCallStack(unsigned Limit) { // Walk the call stack and add the diagnostics. unsigned CallIdx = 0; - Frame *Top = getCurrentFrame(); + const Frame *Top = getCurrentFrame(); const Frame *Bottom = getBottomFrame(); - for (Frame *F = Top; F != Bottom; F = F->getCaller(), ++CallIdx) { - SourceLocation CallLocation = F->getCallLocation(); + for (const Frame *F = Top; F != Bottom; F = F->getCaller(), ++CallIdx) { + SourceRange CallRange = F->getCallRange(); // Skip this call? if (CallIdx >= SkipStart && CallIdx < SkipEnd) { if (CallIdx == SkipStart) { // Note that we're skipping calls. - addDiag(CallLocation, diag::note_constexpr_calls_suppressed) + addDiag(CallRange.getBegin(), diag::note_constexpr_calls_suppressed) << unsigned(ActiveCalls - Limit); } continue; @@ -142,17 +143,20 @@ void State::addCallStack(unsigned Limit) { // Use a different note for an inheriting constructor, because from the // user's perspective it's not really a function at all. - if (auto *CD = dyn_cast_or_null<CXXConstructorDecl>(F->getCallee())) { - if (CD->isInheritingConstructor()) { - addDiag(CallLocation, diag::note_constexpr_inherited_ctor_call_here) - << CD->getParent(); - continue; - } + if (const auto *CD = + dyn_cast_if_present<CXXConstructorDecl>(F->getCallee()); + CD && CD->isInheritingConstructor()) { + addDiag(CallRange.getBegin(), + diag::note_constexpr_inherited_ctor_call_here) + << CD->getParent(); + continue; } SmallString<128> Buffer; llvm::raw_svector_ostream Out(Buffer); F->describe(Out); - addDiag(CallLocation, diag::note_constexpr_call_here) << Out.str(); + if (!Buffer.empty()) + addDiag(CallRange.getBegin(), diag::note_constexpr_call_here) + << Out.str() << CallRange; } } diff --git a/contrib/llvm-project/clang/lib/AST/Interp/State.h b/contrib/llvm-project/clang/lib/AST/Interp/State.h index d9a645a3eb3e..f1e8e3618f34 100644 --- a/contrib/llvm-project/clang/lib/AST/Interp/State.h +++ b/contrib/llvm-project/clang/lib/AST/Interp/State.h @@ -15,9 +15,9 @@ #include "clang/AST/ASTDiagnostic.h" #include "clang/AST/Expr.h" -#include "clang/AST/OptionalDiagnostic.h" namespace clang { +class OptionalDiagnostic; /// Kinds of access we can perform on an object, for diagnostics. Note that /// we consider a member function call to be a kind of access, even though @@ -36,7 +36,7 @@ enum AccessKinds { AK_Destroy, }; -// The order of this enum is important for diagnostics. +/// The order of this enum is important for diagnostics. enum CheckSubobjectKind { CSK_Base, CSK_Derived, @@ -71,7 +71,8 @@ public: virtual unsigned getCallStackDepth() = 0; public: - // Diagnose that the evaluation could not be folded (FF => FoldFailure) + State() = default; + /// Diagnose that the evaluation could not be folded (FF => FoldFailure) OptionalDiagnostic FFDiag(SourceLocation Loc, diag::kind DiagId = diag::note_invalid_subexpr_in_const_expr, @@ -118,6 +119,10 @@ public: const LangOptions &getLangOpts() const; + /// Whether or not we're in a context where the front end requires a + /// constant value. + bool InConstantContext = false; + private: void addCallStack(unsigned Limit); diff --git a/contrib/llvm-project/clang/lib/AST/ItaniumCXXABI.cpp b/contrib/llvm-project/clang/lib/AST/ItaniumCXXABI.cpp index be10258a2d77..c9aadce73141 100644 --- a/contrib/llvm-project/clang/lib/AST/ItaniumCXXABI.cpp +++ b/contrib/llvm-project/clang/lib/AST/ItaniumCXXABI.cpp @@ -26,6 +26,7 @@ #include "clang/Basic/TargetInfo.h" #include "llvm/ADT/FoldingSet.h" #include "llvm/ADT/iterator.h" +#include <optional> using namespace clang; @@ -84,8 +85,8 @@ template<typename T> bool isDenseMapKeyTombstone(T V) { V, llvm::DenseMapInfo<T>::getTombstoneKey()); } -template<typename T> -Optional<bool> areDenseMapKeysEqualSpecialValues(T LHS, T RHS) { +template <typename T> +std::optional<bool> areDenseMapKeysEqualSpecialValues(T LHS, T RHS) { bool LHSEmpty = isDenseMapKeyEmpty(LHS); bool RHSEmpty = isDenseMapKeyEmpty(RHS); if (LHSEmpty || RHSEmpty) @@ -96,7 +97,7 @@ Optional<bool> areDenseMapKeysEqualSpecialValues(T LHS, T RHS) { if (LHSTombstone || RHSTombstone) return LHSTombstone && RHSTombstone; - return None; + return std::nullopt; } template<> @@ -113,8 +114,8 @@ struct DenseMapInfo<DecompositionDeclName> { return llvm::hash_combine_range(Key.begin(), Key.end()); } static bool isEqual(DecompositionDeclName LHS, DecompositionDeclName RHS) { - if (Optional<bool> Result = areDenseMapKeysEqualSpecialValues( - LHS.Bindings, RHS.Bindings)) + if (std::optional<bool> Result = + areDenseMapKeysEqualSpecialValues(LHS.Bindings, RHS.Bindings)) return *Result; return LHS.Bindings.size() == RHS.Bindings.size() && @@ -181,6 +182,37 @@ public: } }; +// A version of this for SYCL that makes sure that 'device' mangling context +// matches the lambda mangling number, so that __builtin_sycl_unique_stable_name +// can be consistently generated between a MS and Itanium host by just referring +// to the device mangling number. +class ItaniumSYCLNumberingContext : public ItaniumNumberingContext { + llvm::DenseMap<const CXXMethodDecl *, unsigned> ManglingNumbers; + using ManglingItr = decltype(ManglingNumbers)::iterator; + +public: + ItaniumSYCLNumberingContext(ItaniumMangleContext *Mangler) + : ItaniumNumberingContext(Mangler) {} + + unsigned getManglingNumber(const CXXMethodDecl *CallOperator) override { + unsigned Number = ItaniumNumberingContext::getManglingNumber(CallOperator); + std::pair<ManglingItr, bool> emplace_result = + ManglingNumbers.try_emplace(CallOperator, Number); + (void)emplace_result; + assert(emplace_result.second && "Lambda number set multiple times?"); + return Number; + } + + using ItaniumNumberingContext::getManglingNumber; + + unsigned getDeviceManglingNumber(const CXXMethodDecl *CallOperator) override { + ManglingItr Itr = ManglingNumbers.find(CallOperator); + assert(Itr != ManglingNumbers.end() && "Lambda not yet mangled?"); + + return Itr->second; + } +}; + class ItaniumCXXABI : public CXXABI { private: std::unique_ptr<MangleContext> Mangler; @@ -193,7 +225,7 @@ public: MemberPointerInfo getMemberPointerInfo(const MemberPointerType *MPT) const override { const TargetInfo &Target = Context.getTargetInfo(); - TargetInfo::IntType PtrDiff = Target.getPtrDiffType(0); + TargetInfo::IntType PtrDiff = Target.getPtrDiffType(LangAS::Default); MemberPointerInfo MPI; MPI.Width = Target.getTypeWidth(PtrDiff); MPI.Align = Target.getTypeAlign(PtrDiff); @@ -220,8 +252,8 @@ public: return false; const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); - CharUnits PointerSize = - Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0)); + CharUnits PointerSize = Context.toCharUnitsFromBits( + Context.getTargetInfo().getPointerWidth(LangAS::Default)); return Layout.getNonVirtualSize() == PointerSize; } @@ -249,6 +281,9 @@ public: std::unique_ptr<MangleNumberingContext> createMangleNumberingContext() const override { + if (Context.getLangOpts().isSYCL()) + return std::make_unique<ItaniumSYCLNumberingContext>( + cast<ItaniumMangleContext>(Mangler.get())); return std::make_unique<ItaniumNumberingContext>( cast<ItaniumMangleContext>(Mangler.get())); } diff --git a/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp b/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp index 8cbac66fcf00..d46d621d4c7d 100644 --- a/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp +++ b/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp @@ -28,6 +28,7 @@ #include "clang/AST/Mangle.h" #include "clang/AST/TypeLoc.h" #include "clang/Basic/ABI.h" +#include "clang/Basic/DiagnosticAST.h" #include "clang/Basic/Module.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/TargetInfo.h" @@ -35,70 +36,17 @@ #include "llvm/ADT/StringExtras.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" +#include "llvm/TargetParser/RISCVTargetParser.h" +#include <optional> using namespace clang; namespace { -/// Retrieve the declaration context that should be used when mangling the given -/// declaration. -static const DeclContext *getEffectiveDeclContext(const Decl *D) { - // The ABI assumes that lambda closure types that occur within - // default arguments live in the context of the function. However, due to - // the way in which Clang parses and creates function declarations, this is - // not the case: the lambda closure type ends up living in the context - // where the function itself resides, because the function declaration itself - // had not yet been created. Fix the context here. - if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) { - if (RD->isLambda()) - if (ParmVarDecl *ContextParam - = dyn_cast_or_null<ParmVarDecl>(RD->getLambdaContextDecl())) - return ContextParam->getDeclContext(); - } - - // Perform the same check for block literals. - if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) { - if (ParmVarDecl *ContextParam - = dyn_cast_or_null<ParmVarDecl>(BD->getBlockManglingContextDecl())) - return ContextParam->getDeclContext(); - } - - const DeclContext *DC = D->getDeclContext(); - if (isa<CapturedDecl>(DC) || isa<OMPDeclareReductionDecl>(DC) || - isa<OMPDeclareMapperDecl>(DC)) { - return getEffectiveDeclContext(cast<Decl>(DC)); - } - - if (const auto *VD = dyn_cast<VarDecl>(D)) - if (VD->isExternC()) - return VD->getASTContext().getTranslationUnitDecl(); - - if (const auto *FD = dyn_cast<FunctionDecl>(D)) - if (FD->isExternC()) - return FD->getASTContext().getTranslationUnitDecl(); - - return DC->getRedeclContext(); -} - -static const DeclContext *getEffectiveParentContext(const DeclContext *DC) { - return getEffectiveDeclContext(cast<Decl>(DC)); -} - static bool isLocalContainerContext(const DeclContext *DC) { return isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC) || isa<BlockDecl>(DC); } -static const RecordDecl *GetLocalClassDecl(const Decl *D) { - const DeclContext *DC = getEffectiveDeclContext(D); - while (!DC->isNamespace() && !DC->isTranslationUnit()) { - if (isLocalContainerContext(DC)) - return dyn_cast<RecordDecl>(D); - D = cast<Decl>(DC); - DC = getEffectiveDeclContext(D); - } - return nullptr; -} - static const FunctionDecl *getStructor(const FunctionDecl *fn) { if (const FunctionTemplateDecl *ftd = fn->getPrimaryTemplate()) return ftd->getTemplatedDecl(); @@ -126,14 +74,15 @@ class ItaniumMangleContextImpl : public ItaniumMangleContext { llvm::DenseMap<DiscriminatorKeyTy, unsigned> Discriminator; llvm::DenseMap<const NamedDecl*, unsigned> Uniquifier; const DiscriminatorOverrideTy DiscriminatorOverride = nullptr; + NamespaceDecl *StdNamespace = nullptr; bool NeedsUniqueInternalLinkageNames = false; public: explicit ItaniumMangleContextImpl( ASTContext &Context, DiagnosticsEngine &Diags, - DiscriminatorOverrideTy DiscriminatorOverride) - : ItaniumMangleContext(Context, Diags), + DiscriminatorOverrideTy DiscriminatorOverride, bool IsAux = false) + : ItaniumMangleContext(Context, Diags, IsAux), DiscriminatorOverride(DiscriminatorOverride) {} /// @name Mangler Entry Points @@ -150,11 +99,10 @@ public: } void mangleCXXName(GlobalDecl GD, raw_ostream &) override; - void mangleThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk, + void mangleThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk, bool, raw_ostream &) override; void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type, - const ThisAdjustment &ThisAdjustment, - raw_ostream &) override; + const ThunkInfo &Thunk, bool, raw_ostream &) override; void mangleReferenceTemporary(const VarDecl *D, unsigned ManglingNumber, raw_ostream &) override; void mangleCXXVTable(const CXXRecordDecl *RD, raw_ostream &) override; @@ -162,8 +110,10 @@ public: void mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset, const CXXRecordDecl *Type, raw_ostream &) override; void mangleCXXRTTI(QualType T, raw_ostream &) override; - void mangleCXXRTTIName(QualType T, raw_ostream &) override; - void mangleTypeName(QualType T, raw_ostream &) override; + void mangleCXXRTTIName(QualType T, raw_ostream &, + bool NormalizeIntegers) override; + void mangleCanonicalTypeName(QualType T, raw_ostream &, + bool NormalizeIntegers) override; void mangleCXXCtorComdat(const CXXConstructorDecl *D, raw_ostream &) override; void mangleCXXDtorComdat(const CXXDestructorDecl *D, raw_ostream &) override; @@ -172,9 +122,9 @@ public: void mangleDynamicAtExitDestructor(const VarDecl *D, raw_ostream &Out) override; void mangleDynamicStermFinalizer(const VarDecl *D, raw_ostream &Out) override; - void mangleSEHFilterExpression(const NamedDecl *EnclosingDecl, + void mangleSEHFilterExpression(GlobalDecl EnclosingDecl, raw_ostream &Out) override; - void mangleSEHFinallyBlock(const NamedDecl *EnclosingDecl, + void mangleSEHFinallyBlock(GlobalDecl EnclosingDecl, raw_ostream &Out) override; void mangleItaniumThreadLocalInit(const VarDecl *D, raw_ostream &) override; void mangleItaniumThreadLocalWrapper(const VarDecl *D, @@ -184,6 +134,8 @@ public: void mangleLambdaSig(const CXXRecordDecl *Lambda, raw_ostream &) override; + void mangleModuleInitializer(const Module *Module, raw_ostream &) override; + bool getNextDiscriminator(const NamedDecl *ND, unsigned &disc) { // Lambda closure types are already numbered. if (isLambda(ND)) @@ -197,7 +149,7 @@ public: // Use the canonical number for externally visible decls. if (ND->isExternallyVisible()) { - unsigned discriminator = getASTContext().getManglingNumber(ND); + unsigned discriminator = getASTContext().getManglingNumber(ND, isAux()); if (discriminator == 1) return false; disc = discriminator - 2; @@ -249,6 +201,15 @@ public: return DiscriminatorOverride; } + NamespaceDecl *getStdNamespace(); + + const DeclContext *getEffectiveDeclContext(const Decl *D); + const DeclContext *getEffectiveParentContext(const DeclContext *DC) { + return getEffectiveDeclContext(cast<Decl>(DC)); + } + + bool isInternalLinkageDecl(const NamedDecl *ND); + /// @} }; @@ -256,6 +217,10 @@ public: class CXXNameMangler { ItaniumMangleContextImpl &Context; raw_ostream &Out; + /// Normalize integer types for cross-language CFI support with other + /// languages that can't represent and encode C/C++ integer types. + bool NormalizeIntegers = false; + bool NullOut = false; /// In the "DisableDerivedAbiTags" mode derived ABI tags are not calculated. /// This mode is used when mangler creates another mangler recursively to @@ -267,18 +232,23 @@ class CXXNameMangler { /// that's not a template specialization; otherwise it's the pattern /// for that specialization. const NamedDecl *Structor; - unsigned StructorType; + unsigned StructorType = 0; + + // An offset to add to all template parameter depths while mangling. Used + // when mangling a template parameter list to see if it matches a template + // template parameter exactly. + unsigned TemplateDepthOffset = 0; /// The next substitution sequence number. - unsigned SeqID; + unsigned SeqID = 0; class FunctionTypeDepthState { - unsigned Bits; + unsigned Bits = 0; enum { InResultTypeMask = 1 }; public: - FunctionTypeDepthState() : Bits(0) {} + FunctionTypeDepthState() = default; /// The number of function types we're inside. unsigned getDepth() const { @@ -427,35 +397,58 @@ class CXXNameMangler { ASTContext &getASTContext() const { return Context.getASTContext(); } + bool isCompatibleWith(LangOptions::ClangABI Ver) { + return Context.getASTContext().getLangOpts().getClangABICompat() <= Ver; + } + + bool isStd(const NamespaceDecl *NS); + bool isStdNamespace(const DeclContext *DC); + + const RecordDecl *GetLocalClassDecl(const Decl *D); + bool isSpecializedAs(QualType S, llvm::StringRef Name, QualType A); + bool isStdCharSpecialization(const ClassTemplateSpecializationDecl *SD, + llvm::StringRef Name, bool HasAllocator); + public: CXXNameMangler(ItaniumMangleContextImpl &C, raw_ostream &Out_, const NamedDecl *D = nullptr, bool NullOut_ = false) - : Context(C), Out(Out_), NullOut(NullOut_), Structor(getStructor(D)), - StructorType(0), SeqID(0), AbiTagsRoot(AbiTags) { + : Context(C), Out(Out_), NullOut(NullOut_), Structor(getStructor(D)), + AbiTagsRoot(AbiTags) { // These can't be mangled without a ctor type or dtor type. assert(!D || (!isa<CXXDestructorDecl>(D) && !isa<CXXConstructorDecl>(D))); } CXXNameMangler(ItaniumMangleContextImpl &C, raw_ostream &Out_, const CXXConstructorDecl *D, CXXCtorType Type) - : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type), - SeqID(0), AbiTagsRoot(AbiTags) { } + : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type), + AbiTagsRoot(AbiTags) {} CXXNameMangler(ItaniumMangleContextImpl &C, raw_ostream &Out_, const CXXDestructorDecl *D, CXXDtorType Type) - : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type), - SeqID(0), AbiTagsRoot(AbiTags) { } + : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type), + AbiTagsRoot(AbiTags) {} + CXXNameMangler(ItaniumMangleContextImpl &C, raw_ostream &Out_, + bool NormalizeIntegers_) + : Context(C), Out(Out_), NormalizeIntegers(NormalizeIntegers_), + NullOut(false), Structor(nullptr), AbiTagsRoot(AbiTags) {} CXXNameMangler(CXXNameMangler &Outer, raw_ostream &Out_) - : Context(Outer.Context), Out(Out_), NullOut(false), - Structor(Outer.Structor), StructorType(Outer.StructorType), - SeqID(Outer.SeqID), FunctionTypeDepth(Outer.FunctionTypeDepth), - AbiTagsRoot(AbiTags), Substitutions(Outer.Substitutions) {} + : Context(Outer.Context), Out(Out_), Structor(Outer.Structor), + StructorType(Outer.StructorType), SeqID(Outer.SeqID), + FunctionTypeDepth(Outer.FunctionTypeDepth), AbiTagsRoot(AbiTags), + Substitutions(Outer.Substitutions), + ModuleSubstitutions(Outer.ModuleSubstitutions) {} CXXNameMangler(CXXNameMangler &Outer, llvm::raw_null_ostream &Out_) - : Context(Outer.Context), Out(Out_), NullOut(true), - Structor(Outer.Structor), StructorType(Outer.StructorType), - SeqID(Outer.SeqID), FunctionTypeDepth(Outer.FunctionTypeDepth), - AbiTagsRoot(AbiTags), Substitutions(Outer.Substitutions) {} + : CXXNameMangler(Outer, (raw_ostream &)Out_) { + NullOut = true; + } + + struct WithTemplateDepthOffset { unsigned Offset; }; + CXXNameMangler(ItaniumMangleContextImpl &C, raw_ostream &Out, + WithTemplateDepthOffset Offset) + : CXXNameMangler(C, Out) { + TemplateDepthOffset = Offset.Offset; + } raw_ostream &getStream() { return Out; } @@ -473,10 +466,13 @@ public: void mangleType(QualType T); void mangleNameOrStandardSubstitution(const NamedDecl *ND); void mangleLambdaSig(const CXXRecordDecl *Lambda); + void mangleModuleNamePrefix(StringRef Name, bool IsPartition = false); + void mangleVendorQualifier(StringRef Name); private: bool mangleSubstitution(const NamedDecl *ND); + bool mangleSubstitution(NestedNameSpecifier *NNS); bool mangleSubstitution(QualType T); bool mangleSubstitution(TemplateName Template); bool mangleSubstitution(uintptr_t Ptr); @@ -490,6 +486,11 @@ private: addSubstitution(reinterpret_cast<uintptr_t>(ND)); } + void addSubstitution(NestedNameSpecifier *NNS) { + NNS = Context.getASTContext().getCanonicalNestedNameSpecifier(NNS); + + addSubstitution(reinterpret_cast<uintptr_t>(NNS)); + } void addSubstitution(QualType T); void addSubstitution(TemplateName Template); void addSubstitution(uintptr_t Ptr); @@ -508,22 +509,20 @@ private: void mangleNameWithAbiTags(GlobalDecl GD, const AbiTagList *AdditionalAbiTags); - void mangleModuleName(const Module *M); - void mangleModuleNamePrefix(StringRef Name); + void mangleModuleName(const NamedDecl *ND); void mangleTemplateName(const TemplateDecl *TD, - const TemplateArgument *TemplateArgs, - unsigned NumTemplateArgs); - void mangleUnqualifiedName(GlobalDecl GD, + ArrayRef<TemplateArgument> Args); + void mangleUnqualifiedName(GlobalDecl GD, const DeclContext *DC, const AbiTagList *AdditionalAbiTags) { - mangleUnqualifiedName(GD, cast<NamedDecl>(GD.getDecl())->getDeclName(), UnknownArity, - AdditionalAbiTags); + mangleUnqualifiedName(GD, cast<NamedDecl>(GD.getDecl())->getDeclName(), DC, + UnknownArity, AdditionalAbiTags); } void mangleUnqualifiedName(GlobalDecl GD, DeclarationName Name, - unsigned KnownArity, + const DeclContext *DC, unsigned KnownArity, const AbiTagList *AdditionalAbiTags); - void mangleUnscopedName(GlobalDecl GD, + void mangleUnscopedName(GlobalDecl GD, const DeclContext *DC, const AbiTagList *AdditionalAbiTags); - void mangleUnscopedTemplateName(GlobalDecl GD, + void mangleUnscopedTemplateName(GlobalDecl GD, const DeclContext *DC, const AbiTagList *AdditionalAbiTags); void mangleSourceName(const IdentifierInfo *II); void mangleRegCallName(const IdentifierInfo *II); @@ -535,13 +534,17 @@ private: void mangleBlockForPrefix(const BlockDecl *Block); void mangleUnqualifiedBlock(const BlockDecl *Block); void mangleTemplateParamDecl(const NamedDecl *Decl); + void mangleTemplateParameterList(const TemplateParameterList *Params); + void mangleTypeConstraint(const ConceptDecl *Concept, + ArrayRef<TemplateArgument> Arguments); + void mangleTypeConstraint(const TypeConstraint *Constraint); + void mangleRequiresClause(const Expr *RequiresClause); void mangleLambda(const CXXRecordDecl *Lambda); void mangleNestedName(GlobalDecl GD, const DeclContext *DC, const AbiTagList *AdditionalAbiTags, bool NoFunction=false); void mangleNestedName(const TemplateDecl *TD, - const TemplateArgument *TemplateArgs, - unsigned NumTemplateArgs); + ArrayRef<TemplateArgument> Args); void mangleNestedNameWithClosurePrefix(GlobalDecl GD, const NamedDecl *PrefixND, const AbiTagList *AdditionalAbiTags); @@ -556,7 +559,6 @@ private: StringRef Prefix = ""); void mangleOperatorName(DeclarationName Name, unsigned Arity); void mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity); - void mangleVendorQualifier(StringRef qualifier); void mangleQualifiers(Qualifiers Quals, const DependentAddressSpaceType *DAST = nullptr); void mangleRefQualifier(RefQualifierKind RefQualifier); @@ -581,6 +583,8 @@ private: void mangleAArch64NeonVectorType(const DependentVectorType *T); void mangleAArch64FixedSveVectorType(const VectorType *T); void mangleAArch64FixedSveVectorType(const DependentVectorType *T); + void mangleRISCVFixedRVVVectorType(const VectorType *T); + void mangleRISCVFixedRVVVectorType(const DependentVectorType *T); void mangleIntegerLiteral(QualType T, const llvm::APSInt &Value); void mangleFloatLiteral(QualType T, const llvm::APFloat &V); @@ -597,17 +601,21 @@ private: unsigned knownArity); void mangleCastExpression(const Expr *E, StringRef CastEncoding); void mangleInitListElements(const InitListExpr *InitList); + void mangleRequirement(SourceLocation RequiresExprLoc, + const concepts::Requirement *Req); void mangleExpression(const Expr *E, unsigned Arity = UnknownArity, bool AsTemplateArg = false); void mangleCXXCtorType(CXXCtorType T, const CXXRecordDecl *InheritedFrom); void mangleCXXDtorType(CXXDtorType T); + struct TemplateArgManglingInfo; void mangleTemplateArgs(TemplateName TN, const TemplateArgumentLoc *TemplateArgs, unsigned NumTemplateArgs); - void mangleTemplateArgs(TemplateName TN, const TemplateArgument *TemplateArgs, - unsigned NumTemplateArgs); + void mangleTemplateArgs(TemplateName TN, ArrayRef<TemplateArgument> Args); void mangleTemplateArgs(TemplateName TN, const TemplateArgumentList &AL); + void mangleTemplateArg(TemplateArgManglingInfo &Info, unsigned Index, + TemplateArgument A); void mangleTemplateArg(TemplateArgument A, bool NeedExactType); void mangleTemplateArgExpr(const Expr *E); void mangleValueInTemplateArg(QualType T, const APValue &V, bool TopLevel, @@ -628,8 +636,79 @@ private: } -static bool isInternalLinkageDecl(const NamedDecl *ND) { - if (ND && ND->getFormalLinkage() == InternalLinkage && +NamespaceDecl *ItaniumMangleContextImpl::getStdNamespace() { + if (!StdNamespace) { + StdNamespace = NamespaceDecl::Create( + getASTContext(), getASTContext().getTranslationUnitDecl(), + /*Inline=*/false, SourceLocation(), SourceLocation(), + &getASTContext().Idents.get("std"), + /*PrevDecl=*/nullptr, /*Nested=*/false); + StdNamespace->setImplicit(); + } + return StdNamespace; +} + +/// Retrieve the declaration context that should be used when mangling the given +/// declaration. +const DeclContext * +ItaniumMangleContextImpl::getEffectiveDeclContext(const Decl *D) { + // The ABI assumes that lambda closure types that occur within + // default arguments live in the context of the function. However, due to + // the way in which Clang parses and creates function declarations, this is + // not the case: the lambda closure type ends up living in the context + // where the function itself resides, because the function declaration itself + // had not yet been created. Fix the context here. + if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) { + if (RD->isLambda()) + if (ParmVarDecl *ContextParam = + dyn_cast_or_null<ParmVarDecl>(RD->getLambdaContextDecl())) + return ContextParam->getDeclContext(); + } + + // Perform the same check for block literals. + if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) { + if (ParmVarDecl *ContextParam = + dyn_cast_or_null<ParmVarDecl>(BD->getBlockManglingContextDecl())) + return ContextParam->getDeclContext(); + } + + // On ARM and AArch64, the va_list tag is always mangled as if in the std + // namespace. We do not represent va_list as actually being in the std + // namespace in C because this would result in incorrect debug info in C, + // among other things. It is important for both languages to have the same + // mangling in order for -fsanitize=cfi-icall to work. + if (D == getASTContext().getVaListTagDecl()) { + const llvm::Triple &T = getASTContext().getTargetInfo().getTriple(); + if (T.isARM() || T.isThumb() || T.isAArch64()) + return getStdNamespace(); + } + + const DeclContext *DC = D->getDeclContext(); + if (isa<CapturedDecl>(DC) || isa<OMPDeclareReductionDecl>(DC) || + isa<OMPDeclareMapperDecl>(DC)) { + return getEffectiveDeclContext(cast<Decl>(DC)); + } + + if (const auto *VD = dyn_cast<VarDecl>(D)) + if (VD->isExternC()) + return getASTContext().getTranslationUnitDecl(); + + if (const auto *FD = dyn_cast<FunctionDecl>(D)) { + if (FD->isExternC()) + return getASTContext().getTranslationUnitDecl(); + // Member-like constrained friends are mangled as if they were members of + // the enclosing class. + if (FD->isMemberLikeConstrainedFriend() && + getASTContext().getLangOpts().getClangABICompat() > + LangOptions::ClangABI::Ver17) + return D->getLexicalDeclContext()->getRedeclContext(); + } + + return DC->getRedeclContext(); +} + +bool ItaniumMangleContextImpl::isInternalLinkageDecl(const NamedDecl *ND) { + if (ND && ND->getFormalLinkage() == Linkage::Internal && !ND->isExternallyVisible() && getEffectiveDeclContext(ND)->isFileContext() && !ND->isInAnonymousNamespace()) @@ -659,8 +738,7 @@ bool ItaniumMangleContextImpl::isUniqueInternalLinkageDecl( } bool ItaniumMangleContextImpl::shouldMangleCXXName(const NamedDecl *D) { - const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); - if (FD) { + if (const auto *FD = dyn_cast<FunctionDecl>(D)) { LanguageLinkage L = FD->getLanguageLinkage(); // Overloadable functions need mangling. if (FD->hasAttr<OverloadableAttr>()) @@ -696,21 +774,26 @@ bool ItaniumMangleContextImpl::shouldMangleCXXName(const NamedDecl *D) { if (!getASTContext().getLangOpts().CPlusPlus) return false; - const VarDecl *VD = dyn_cast<VarDecl>(D); - if (VD && !isa<DecompositionDecl>(D)) { + if (const auto *VD = dyn_cast<VarDecl>(D)) { + // Decompositions are mangled. + if (isa<DecompositionDecl>(VD)) + return true; + // C variables are not mangled. if (VD->isExternC()) return false; - // Variables at global scope with non-internal linkage are not mangled + // Variables at global scope are not mangled unless they have internal + // linkage or are specializations or are attached to a named module. const DeclContext *DC = getEffectiveDeclContext(D); // Check for extern variable declared locally. if (DC->isFunctionOrMethod() && D->hasLinkage()) - while (!DC->isNamespace() && !DC->isTranslationUnit()) + while (!DC->isFileContext()) DC = getEffectiveParentContext(DC); - if (DC->isTranslationUnit() && D->getFormalLinkage() != InternalLinkage && + if (DC->isTranslationUnit() && D->getFormalLinkage() != Linkage::Internal && !CXXNameMangler::shouldHaveAbiTags(*this, VD) && - !isa<VarTemplateSpecializationDecl>(D)) + !isa<VarTemplateSpecializationDecl>(VD) && + !VD->getOwningModuleForLinkage()) return false; } @@ -758,8 +841,17 @@ void CXXNameMangler::mangleFunctionEncoding(GlobalDecl GD) { AbiTagList ReturnTypeAbiTags = makeFunctionReturnTypeTags(FD); if (ReturnTypeAbiTags.empty()) { - // There are no tags for return type, the simplest case. + // There are no tags for return type, the simplest case. Enter the function + // parameter scope before mangling the name, because a template using + // constrained `auto` can have references to its parameters within its + // template argument list: + // + // template<typename T> void f(T x, C<decltype(x)> auto) + // ... is mangled as ... + // template<typename T, C<decltype(param 1)> U> void f(T, U) + FunctionTypeDepthState Saved = FunctionTypeDepth.push(); mangleName(GD); + FunctionTypeDepth.pop(Saved); mangleFunctionEncodingBareType(FD); return; } @@ -772,7 +864,10 @@ void CXXNameMangler::mangleFunctionEncoding(GlobalDecl GD) { CXXNameMangler FunctionEncodingMangler(*this, FunctionEncodingStream); // Output name of the function. FunctionEncodingMangler.disableDerivedAbiTags(); + + FunctionTypeDepthState Saved = FunctionTypeDepth.push(); FunctionEncodingMangler.mangleNameWithAbiTags(FD, nullptr); + FunctionTypeDepth.pop(Saved); // Remember length of the function name in the buffer. size_t EncodingPositionStart = FunctionEncodingStream.str().size(); @@ -790,7 +885,9 @@ void CXXNameMangler::mangleFunctionEncoding(GlobalDecl GD) { AdditionalAbiTags.end()); // Output name with implicit tags and function encoding from temporary buffer. + Saved = FunctionTypeDepth.push(); mangleNameWithAbiTags(FD, &AdditionalAbiTags); + FunctionTypeDepth.pop(Saved); Out << FunctionEncodingStream.str().substr(EncodingPositionStart); // Function encoding could create new substitutions so we have to add @@ -808,16 +905,15 @@ void CXXNameMangler::mangleFunctionEncodingBareType(const FunctionDecl *FD) { EnableIfAttr *EIA = dyn_cast<EnableIfAttr>(*I); if (!EIA) continue; - if (Context.getASTContext().getLangOpts().getClangABICompat() > - LangOptions::ClangABI::Ver11) { - mangleTemplateArgExpr(EIA->getCond()); - } else { + if (isCompatibleWith(LangOptions::ClangABI::Ver11)) { // Prior to Clang 12, we hardcoded the X/E around enable-if's argument, // even though <template-arg> should not include an X/E around // <expr-primary>. Out << 'X'; mangleExpression(EIA->getCond()); Out << 'E'; + } else { + mangleTemplateArgExpr(EIA->getCond()); } } Out << 'E'; @@ -860,27 +956,18 @@ void CXXNameMangler::mangleFunctionEncodingBareType(const FunctionDecl *FD) { MangleReturnType, FD); } -static const DeclContext *IgnoreLinkageSpecDecls(const DeclContext *DC) { - while (isa<LinkageSpecDecl>(DC)) { - DC = getEffectiveParentContext(DC); - } - - return DC; -} - /// Return whether a given namespace is the 'std' namespace. -static bool isStd(const NamespaceDecl *NS) { - if (!IgnoreLinkageSpecDecls(getEffectiveParentContext(NS)) - ->isTranslationUnit()) +bool CXXNameMangler::isStd(const NamespaceDecl *NS) { + if (!Context.getEffectiveParentContext(NS)->isTranslationUnit()) return false; - const IdentifierInfo *II = NS->getOriginalNamespace()->getIdentifier(); + const IdentifierInfo *II = NS->getFirstDecl()->getIdentifier(); return II && II->isStr("std"); } // isStdNamespace - Return whether a given decl context is a toplevel 'std' // namespace. -static bool isStdNamespace(const DeclContext *DC) { +bool CXXNameMangler::isStdNamespace(const DeclContext *DC) { if (!DC->isNamespace()) return false; @@ -954,6 +1041,17 @@ void CXXNameMangler::mangleName(GlobalDecl GD) { } } +const RecordDecl *CXXNameMangler::GetLocalClassDecl(const Decl *D) { + const DeclContext *DC = Context.getEffectiveDeclContext(D); + while (!DC->isNamespace() && !DC->isTranslationUnit()) { + if (isLocalContainerContext(DC)) + return dyn_cast<RecordDecl>(D); + D = cast<Decl>(DC); + DC = Context.getEffectiveDeclContext(D); + } + return nullptr; +} + void CXXNameMangler::mangleNameWithAbiTags(GlobalDecl GD, const AbiTagList *AdditionalAbiTags) { const NamedDecl *ND = cast<NamedDecl>(GD.getDecl()); @@ -962,34 +1060,23 @@ void CXXNameMangler::mangleNameWithAbiTags(GlobalDecl GD, // ::= [<module-name>] <unscoped-template-name> <template-args> // ::= <local-name> // - const DeclContext *DC = getEffectiveDeclContext(ND); + const DeclContext *DC = Context.getEffectiveDeclContext(ND); + bool IsLambda = isLambda(ND); // If this is an extern variable declared locally, the relevant DeclContext // is that of the containing namespace, or the translation unit. // FIXME: This is a hack; extern variables declared locally should have // a proper semantic declaration context! - if (isLocalContainerContext(DC) && ND->hasLinkage() && !isLambda(ND)) + if (isLocalContainerContext(DC) && ND->hasLinkage() && !IsLambda) while (!DC->isNamespace() && !DC->isTranslationUnit()) - DC = getEffectiveParentContext(DC); - else if (GetLocalClassDecl(ND)) { + DC = Context.getEffectiveParentContext(DC); + else if (GetLocalClassDecl(ND) && + (!IsLambda || isCompatibleWith(LangOptions::ClangABI::Ver18))) { mangleLocalName(GD, AdditionalAbiTags); return; } - DC = IgnoreLinkageSpecDecls(DC); - - if (isLocalContainerContext(DC)) { - mangleLocalName(GD, AdditionalAbiTags); - return; - } - - // Do not mangle the owning module for an external linkage declaration. - // This enables backwards-compatibility with non-modular code, and is - // a valid choice since conflicts are not permitted by C++ Modules TS - // [basic.def.odr]/6.2. - if (!ND->hasExternalFormalLinkage()) - if (Module *M = ND->getOwningModuleForLinkage()) - mangleModuleName(M); + assert(!isa<LinkageSpecDecl>(DC) && "context cannot be LinkageSpecDecl"); // Closures can require a nested-name mangling even if they're semantically // in the global namespace. @@ -998,42 +1085,44 @@ void CXXNameMangler::mangleNameWithAbiTags(GlobalDecl GD, return; } + if (isLocalContainerContext(DC)) { + mangleLocalName(GD, AdditionalAbiTags); + return; + } + if (DC->isTranslationUnit() || isStdNamespace(DC)) { // Check if we have a template. const TemplateArgumentList *TemplateArgs = nullptr; if (GlobalDecl TD = isTemplate(GD, TemplateArgs)) { - mangleUnscopedTemplateName(TD, AdditionalAbiTags); + mangleUnscopedTemplateName(TD, DC, AdditionalAbiTags); mangleTemplateArgs(asTemplateName(TD), *TemplateArgs); return; } - mangleUnscopedName(GD, AdditionalAbiTags); + mangleUnscopedName(GD, DC, AdditionalAbiTags); return; } mangleNestedName(GD, DC, AdditionalAbiTags); } -void CXXNameMangler::mangleModuleName(const Module *M) { - // Implement the C++ Modules TS name mangling proposal; see - // https://gcc.gnu.org/wiki/cxx-modules?action=AttachFile - // - // <module-name> ::= W <unscoped-name>+ E - // ::= W <module-subst> <unscoped-name>* E - Out << 'W'; - mangleModuleNamePrefix(M->Name); - Out << 'E'; +void CXXNameMangler::mangleModuleName(const NamedDecl *ND) { + if (ND->isExternallyVisible()) + if (Module *M = ND->getOwningModuleForLinkage()) + mangleModuleNamePrefix(M->getPrimaryModuleInterfaceName()); } -void CXXNameMangler::mangleModuleNamePrefix(StringRef Name) { - // <module-subst> ::= _ <seq-id> # 0 < seq-id < 10 - // ::= W <seq-id - 10> _ # otherwise +// <module-name> ::= <module-subname> +// ::= <module-name> <module-subname> +// ::= <substitution> +// <module-subname> ::= W <source-name> +// ::= W P <source-name> +void CXXNameMangler::mangleModuleNamePrefix(StringRef Name, bool IsPartition) { + // <substitution> ::= S <seq-id> _ auto It = ModuleSubstitutions.find(Name); if (It != ModuleSubstitutions.end()) { - if (It->second < 10) - Out << '_' << static_cast<char>('0' + It->second); - else - Out << 'W' << (It->second - 10) << '_'; + Out << 'S'; + mangleSeqID(It->second); return; } @@ -1042,40 +1131,44 @@ void CXXNameMangler::mangleModuleNamePrefix(StringRef Name) { auto Parts = Name.rsplit('.'); if (Parts.second.empty()) Parts.second = Parts.first; - else - mangleModuleNamePrefix(Parts.first); + else { + mangleModuleNamePrefix(Parts.first, IsPartition); + IsPartition = false; + } + Out << 'W'; + if (IsPartition) + Out << 'P'; Out << Parts.second.size() << Parts.second; - ModuleSubstitutions.insert({Name, ModuleSubstitutions.size()}); + ModuleSubstitutions.insert({Name, SeqID++}); } void CXXNameMangler::mangleTemplateName(const TemplateDecl *TD, - const TemplateArgument *TemplateArgs, - unsigned NumTemplateArgs) { - const DeclContext *DC = IgnoreLinkageSpecDecls(getEffectiveDeclContext(TD)); + ArrayRef<TemplateArgument> Args) { + const DeclContext *DC = Context.getEffectiveDeclContext(TD); if (DC->isTranslationUnit() || isStdNamespace(DC)) { - mangleUnscopedTemplateName(TD, nullptr); - mangleTemplateArgs(asTemplateName(TD), TemplateArgs, NumTemplateArgs); + mangleUnscopedTemplateName(TD, DC, nullptr); + mangleTemplateArgs(asTemplateName(TD), Args); } else { - mangleNestedName(TD, TemplateArgs, NumTemplateArgs); + mangleNestedName(TD, Args); } } -void CXXNameMangler::mangleUnscopedName(GlobalDecl GD, +void CXXNameMangler::mangleUnscopedName(GlobalDecl GD, const DeclContext *DC, const AbiTagList *AdditionalAbiTags) { - const NamedDecl *ND = cast<NamedDecl>(GD.getDecl()); // <unscoped-name> ::= <unqualified-name> // ::= St <unqualified-name> # ::std:: - if (isStdNamespace(IgnoreLinkageSpecDecls(getEffectiveDeclContext(ND)))) + assert(!isa<LinkageSpecDecl>(DC) && "unskipped LinkageSpecDecl"); + if (isStdNamespace(DC)) Out << "St"; - mangleUnqualifiedName(GD, AdditionalAbiTags); + mangleUnqualifiedName(GD, DC, AdditionalAbiTags); } void CXXNameMangler::mangleUnscopedTemplateName( - GlobalDecl GD, const AbiTagList *AdditionalAbiTags) { + GlobalDecl GD, const DeclContext *DC, const AbiTagList *AdditionalAbiTags) { const TemplateDecl *ND = cast<TemplateDecl>(GD.getDecl()); // <unscoped-template-name> ::= <unscoped-name> // ::= <substitution> @@ -1088,9 +1181,10 @@ void CXXNameMangler::mangleUnscopedTemplateName( "template template param cannot have abi tags"); mangleTemplateParameter(TTP->getDepth(), TTP->getIndex()); } else if (isa<BuiltinTemplateDecl>(ND) || isa<ConceptDecl>(ND)) { - mangleUnscopedName(GD, AdditionalAbiTags); + mangleUnscopedName(GD, DC, AdditionalAbiTags); } else { - mangleUnscopedName(GD.getWithDecl(ND->getTemplatedDecl()), AdditionalAbiTags); + mangleUnscopedName(GD.getWithDecl(ND->getTemplatedDecl()), DC, + AdditionalAbiTags); } addSubstitution(ND); @@ -1206,8 +1300,7 @@ void CXXNameMangler::manglePrefix(QualType type) { // FIXME: GCC does not appear to mangle the template arguments when // the template in question is a dependent template name. Should we // emulate that badness? - mangleTemplateArgs(TST->getTemplateName(), TST->getArgs(), - TST->getNumArgs()); + mangleTemplateArgs(TST->getTemplateName(), TST->template_arguments()); addSubstitution(QualType(TST, 0)); } } else if (const auto *DTST = @@ -1220,7 +1313,7 @@ void CXXNameMangler::manglePrefix(QualType type) { // FIXME: GCC does not appear to mangle the template arguments when // the template in question is a dependent template name. Should we // emulate that badness? - mangleTemplateArgs(Template, DTST->getArgs(), DTST->getNumArgs()); + mangleTemplateArgs(Template, DTST->template_arguments()); addSubstitution(QualType(DTST, 0)); } } else { @@ -1366,15 +1459,29 @@ void CXXNameMangler::mangleUnresolvedName( mangleTemplateArgs(TemplateName(), TemplateArgs, NumTemplateArgs); } -void CXXNameMangler::mangleUnqualifiedName(GlobalDecl GD, - DeclarationName Name, - unsigned KnownArity, - const AbiTagList *AdditionalAbiTags) { +void CXXNameMangler::mangleUnqualifiedName( + GlobalDecl GD, DeclarationName Name, const DeclContext *DC, + unsigned KnownArity, const AbiTagList *AdditionalAbiTags) { const NamedDecl *ND = cast_or_null<NamedDecl>(GD.getDecl()); - unsigned Arity = KnownArity; - // <unqualified-name> ::= <operator-name> + // <unqualified-name> ::= [<module-name>] [F] <operator-name> // ::= <ctor-dtor-name> - // ::= <source-name> + // ::= [<module-name>] [F] <source-name> + // ::= [<module-name>] DC <source-name>* E + + if (ND && DC && DC->isFileContext()) + mangleModuleName(ND); + + // A member-like constrained friend is mangled with a leading 'F'. + // Proposed on https://github.com/itanium-cxx-abi/cxx-abi/issues/24. + auto *FD = dyn_cast<FunctionDecl>(ND); + auto *FTD = dyn_cast<FunctionTemplateDecl>(ND); + if ((FD && FD->isMemberLikeConstrainedFriend()) || + (FTD && FTD->getTemplatedDecl()->isMemberLikeConstrainedFriend())) { + if (!isCompatibleWith(LangOptions::ClangABI::Ver17)) + Out << 'F'; + } + + unsigned Arity = KnownArity; switch (Name.getNameKind()) { case DeclarationName::Identifier: { const IdentifierInfo *II = Name.getAsIdentifierInfo(); @@ -1385,8 +1492,6 @@ void CXXNameMangler::mangleUnqualifiedName(GlobalDecl GD, // // <unqualified-name> ::= DC <source-name>* E // - // These can never be referenced across translation units, so we do - // not need a cross-vendor mangling for anything other than demanglers. // Proposed on cxx-abi-dev on 2016-08-12 Out << "DC"; for (auto *BD : DD->bindings()) @@ -1428,10 +1533,9 @@ void CXXNameMangler::mangleUnqualifiedName(GlobalDecl GD, // 12_GLOBAL__N_1 mangling is quite sufficient there, and this better // matches GCC anyway, because GCC does not treat anonymous namespaces as // implying internal linkage. - if (isInternalLinkageDecl(ND)) + if (Context.isInternalLinkageDecl(ND)) Out << 'L'; - auto *FD = dyn_cast<FunctionDecl>(ND); bool IsRegCall = FD && FD->getType()->castAs<FunctionType>()->getCallConv() == clang::CC_X86RegCall; @@ -1518,9 +1622,16 @@ void CXXNameMangler::mangleUnqualifiedName(GlobalDecl GD, // <lambda-sig> ::= <template-param-decl>* <parameter-type>+ // # Parameter types or 'v' for 'void'. if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(TD)) { - if (Record->isLambda() && (Record->getLambdaManglingNumber() || - Context.getDiscriminatorOverride()( - Context.getASTContext(), Record))) { + std::optional<unsigned> DeviceNumber = + Context.getDiscriminatorOverride()(Context.getASTContext(), Record); + + // If we have a device-number via the discriminator, use that to mangle + // the lambda, otherwise use the typical lambda-mangling-number. In either + // case, a '0' should be mangled as a normal unnamed class instead of as a + // lambda. + if (Record->isLambda() && + ((DeviceNumber && *DeviceNumber > 0) || + (!DeviceNumber && Record->getLambdaManglingNumber() > 0))) { assert(!AdditionalAbiTags && "Lambda type cannot have additional abi tags"); mangleLambda(Record); @@ -1529,7 +1640,8 @@ void CXXNameMangler::mangleUnqualifiedName(GlobalDecl GD, } if (TD->isExternallyVisible()) { - unsigned UnnamedMangle = getASTContext().getManglingNumber(TD); + unsigned UnnamedMangle = + getASTContext().getManglingNumber(TD, Context.isAux()); Out << "Ut"; if (UnnamedMangle > 1) Out << UnnamedMangle - 2; @@ -1540,7 +1652,9 @@ void CXXNameMangler::mangleUnqualifiedName(GlobalDecl GD, // Get a unique id for the anonymous struct. If it is not a real output // ID doesn't matter so use fake one. - unsigned AnonStructId = NullOut ? 0 : Context.getAnonymousStructId(TD); + unsigned AnonStructId = + NullOut ? 0 + : Context.getAnonymousStructId(TD, dyn_cast<FunctionDecl>(DC)); // Mangle it as a source name in the form // [n] $_<id> @@ -1599,6 +1713,7 @@ void CXXNameMangler::mangleUnqualifiedName(GlobalDecl GD, // Otherwise, use the complete destructor name. This is relevant if a // class with a destructor is declared within a destructor. mangleCXXDtorType(Dtor_Complete); + assert(ND); writeAbiTags(ND, AdditionalAbiTags); break; @@ -1608,10 +1723,10 @@ void CXXNameMangler::mangleUnqualifiedName(GlobalDecl GD, // If we have a member function, we need to include the 'this' pointer. if (const auto *MD = dyn_cast<CXXMethodDecl>(ND)) - if (!MD->isStatic()) + if (MD->isImplicitObjectMemberFunction()) Arity++; } - LLVM_FALLTHROUGH; + [[fallthrough]]; case DeclarationName::CXXConversionFunctionName: case DeclarationName::CXXLiteralOperatorName: mangleOperatorName(Name, Arity); @@ -1630,8 +1745,12 @@ void CXXNameMangler::mangleRegCallName(const IdentifierInfo *II) { // <source-name> ::= <positive length number> __regcall3__ <identifier> // <number> ::= [n] <non-negative decimal integer> // <identifier> ::= <unqualified source code identifier> - Out << II->getLength() + sizeof("__regcall3__") - 1 << "__regcall3__" - << II->getName(); + if (getASTContext().getLangOpts().RegCall4) + Out << II->getLength() + sizeof("__regcall4__") - 1 << "__regcall4__" + << II->getName(); + else + Out << II->getLength() + sizeof("__regcall3__") - 1 << "__regcall3__" + << II->getName(); } void CXXNameMangler::mangleDeviceStubName(const IdentifierInfo *II) { @@ -1664,6 +1783,8 @@ void CXXNameMangler::mangleNestedName(GlobalDecl GD, Qualifiers MethodQuals = Method->getMethodQualifiers(); // We do not consider restrict a distinguishing attribute for overloading // purposes so we must not mangle it. + if (Method->isExplicitObjectMemberFunction()) + Out << 'H'; MethodQuals.removeRestrict(); mangleQualifiers(MethodQuals); mangleRefQualifier(Method->getRefQualifier()); @@ -1676,20 +1797,19 @@ void CXXNameMangler::mangleNestedName(GlobalDecl GD, mangleTemplateArgs(asTemplateName(TD), *TemplateArgs); } else { manglePrefix(DC, NoFunction); - mangleUnqualifiedName(GD, AdditionalAbiTags); + mangleUnqualifiedName(GD, DC, AdditionalAbiTags); } Out << 'E'; } void CXXNameMangler::mangleNestedName(const TemplateDecl *TD, - const TemplateArgument *TemplateArgs, - unsigned NumTemplateArgs) { + ArrayRef<TemplateArgument> Args) { // <nested-name> ::= N [<CV-qualifiers>] <template-prefix> <template-args> E Out << 'N'; mangleTemplatePrefix(TD); - mangleTemplateArgs(asTemplateName(TD), TemplateArgs, NumTemplateArgs); + mangleTemplateArgs(asTemplateName(TD), Args); Out << 'E'; } @@ -1706,7 +1826,7 @@ void CXXNameMangler::mangleNestedNameWithClosurePrefix( Out << 'N'; mangleClosurePrefix(PrefixND); - mangleUnqualifiedName(GD, AdditionalAbiTags); + mangleUnqualifiedName(GD, nullptr, AdditionalAbiTags); Out << 'E'; } @@ -1736,7 +1856,7 @@ void CXXNameMangler::mangleLocalName(GlobalDecl GD, // <discriminator> := _ <non-negative number> assert(isa<NamedDecl>(D) || isa<BlockDecl>(D)); const RecordDecl *RD = GetLocalClassDecl(D); - const DeclContext *DC = getEffectiveDeclContext(RD ? RD : D); + const DeclContext *DC = Context.getEffectiveDeclContext(RD ? RD : D); Out << 'Z'; @@ -1784,18 +1904,18 @@ void CXXNameMangler::mangleLocalName(GlobalDecl GD, // Mangle the name relative to the closest enclosing function. // equality ok because RD derived from ND above if (D == RD) { - mangleUnqualifiedName(RD, AdditionalAbiTags); + mangleUnqualifiedName(RD, DC, AdditionalAbiTags); } else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) { if (const NamedDecl *PrefixND = getClosurePrefix(BD)) mangleClosurePrefix(PrefixND, true /*NoFunction*/); else - manglePrefix(getEffectiveDeclContext(BD), true /*NoFunction*/); + manglePrefix(Context.getEffectiveDeclContext(BD), true /*NoFunction*/); assert(!AdditionalAbiTags && "Block cannot have additional abi tags"); mangleUnqualifiedBlock(BD); } else { const NamedDecl *ND = cast<NamedDecl>(D); - mangleNestedName(GD, getEffectiveDeclContext(ND), AdditionalAbiTags, - true /*NoFunction*/); + mangleNestedName(GD, Context.getEffectiveDeclContext(ND), + AdditionalAbiTags, true /*NoFunction*/); } } else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) { // Mangle a block in a default parameter; see above explanation for @@ -1815,7 +1935,7 @@ void CXXNameMangler::mangleLocalName(GlobalDecl GD, assert(!AdditionalAbiTags && "Block cannot have additional abi tags"); mangleUnqualifiedBlock(BD); } else { - mangleUnqualifiedName(GD, AdditionalAbiTags); + mangleUnqualifiedName(GD, DC, AdditionalAbiTags); } if (const NamedDecl *ND = dyn_cast<NamedDecl>(RD ? RD : D)) { @@ -1834,7 +1954,7 @@ void CXXNameMangler::mangleBlockForPrefix(const BlockDecl *Block) { mangleLocalName(Block, /* AdditionalAbiTags */ nullptr); return; } - const DeclContext *DC = getEffectiveDeclContext(Block); + const DeclContext *DC = Context.getEffectiveDeclContext(Block); if (isLocalContainerContext(DC)) { mangleLocalName(Block, /* AdditionalAbiTags */ nullptr); return; @@ -1850,8 +1970,7 @@ void CXXNameMangler::mangleUnqualifiedBlock(const BlockDecl *Block) { // When trying to be ABI-compatibility with clang 12 and before, mangle a // <data-member-prefix> now, with no substitutions and no <template-args>. if (Decl *Context = Block->getBlockManglingContextDecl()) { - if (getASTContext().getLangOpts().getClangABICompat() <= - LangOptions::ClangABI::Ver12 && + if (isCompatibleWith(LangOptions::ClangABI::Ver12) && (isa<VarDecl>(Context) || isa<FieldDecl>(Context)) && Context->getDeclContext()->isRecord()) { const auto *ND = cast<NamedDecl>(Context); @@ -1879,15 +1998,25 @@ void CXXNameMangler::mangleUnqualifiedBlock(const BlockDecl *Block) { } // <template-param-decl> -// ::= Ty # template type parameter -// ::= Tn <type> # template non-type parameter -// ::= Tt <template-param-decl>* E # template template parameter -// ::= Tp <template-param-decl> # template parameter pack +// ::= Ty # template type parameter +// ::= Tk <concept name> [<template-args>] # constrained type parameter +// ::= Tn <type> # template non-type parameter +// ::= Tt <template-param-decl>* E [Q <requires-clause expr>] +// # template template parameter +// ::= Tp <template-param-decl> # template parameter pack void CXXNameMangler::mangleTemplateParamDecl(const NamedDecl *Decl) { + // Proposed on https://github.com/itanium-cxx-abi/cxx-abi/issues/47. if (auto *Ty = dyn_cast<TemplateTypeParmDecl>(Decl)) { if (Ty->isParameterPack()) Out << "Tp"; - Out << "Ty"; + const TypeConstraint *Constraint = Ty->getTypeConstraint(); + if (Constraint && !isCompatibleWith(LangOptions::ClangABI::Ver17)) { + // Proposed on https://github.com/itanium-cxx-abi/cxx-abi/issues/24. + Out << "Tk"; + mangleTypeConstraint(Constraint); + } else { + Out << "Ty"; + } } else if (auto *Tn = dyn_cast<NonTypeTemplateParmDecl>(Decl)) { if (Tn->isExpandedParameterPack()) { for (unsigned I = 0, N = Tn->getNumExpansionTypes(); I != N; ++I) { @@ -1907,29 +2036,59 @@ void CXXNameMangler::mangleTemplateParamDecl(const NamedDecl *Decl) { } else if (auto *Tt = dyn_cast<TemplateTemplateParmDecl>(Decl)) { if (Tt->isExpandedParameterPack()) { for (unsigned I = 0, N = Tt->getNumExpansionTemplateParameters(); I != N; - ++I) { - Out << "Tt"; - for (auto *Param : *Tt->getExpansionTemplateParameters(I)) - mangleTemplateParamDecl(Param); - Out << "E"; - } + ++I) + mangleTemplateParameterList(Tt->getExpansionTemplateParameters(I)); } else { if (Tt->isParameterPack()) Out << "Tp"; - Out << "Tt"; - for (auto *Param : *Tt->getTemplateParameters()) - mangleTemplateParamDecl(Param); - Out << "E"; + mangleTemplateParameterList(Tt->getTemplateParameters()); } } } +void CXXNameMangler::mangleTemplateParameterList( + const TemplateParameterList *Params) { + Out << "Tt"; + for (auto *Param : *Params) + mangleTemplateParamDecl(Param); + mangleRequiresClause(Params->getRequiresClause()); + Out << "E"; +} + +void CXXNameMangler::mangleTypeConstraint( + const ConceptDecl *Concept, ArrayRef<TemplateArgument> Arguments) { + const DeclContext *DC = Context.getEffectiveDeclContext(Concept); + if (!Arguments.empty()) + mangleTemplateName(Concept, Arguments); + else if (DC->isTranslationUnit() || isStdNamespace(DC)) + mangleUnscopedName(Concept, DC, nullptr); + else + mangleNestedName(Concept, DC, nullptr); +} + +void CXXNameMangler::mangleTypeConstraint(const TypeConstraint *Constraint) { + llvm::SmallVector<TemplateArgument, 8> Args; + if (Constraint->getTemplateArgsAsWritten()) { + for (const TemplateArgumentLoc &ArgLoc : + Constraint->getTemplateArgsAsWritten()->arguments()) + Args.push_back(ArgLoc.getArgument()); + } + return mangleTypeConstraint(Constraint->getNamedConcept(), Args); +} + +void CXXNameMangler::mangleRequiresClause(const Expr *RequiresClause) { + // Proposed on https://github.com/itanium-cxx-abi/cxx-abi/issues/24. + if (RequiresClause && !isCompatibleWith(LangOptions::ClangABI::Ver17)) { + Out << 'Q'; + mangleExpression(RequiresClause); + } +} + void CXXNameMangler::mangleLambda(const CXXRecordDecl *Lambda) { // When trying to be ABI-compatibility with clang 12 and before, mangle a // <data-member-prefix> now, with no substitutions. if (Decl *Context = Lambda->getLambdaContextDecl()) { - if (getASTContext().getLangOpts().getClangABICompat() <= - LangOptions::ClangABI::Ver12 && + if (isCompatibleWith(LangOptions::ClangABI::Ver12) && (isa<VarDecl>(Context) || isa<FieldDecl>(Context)) && !isa<ParmVarDecl>(Context)) { if (const IdentifierInfo *Name @@ -1958,10 +2117,10 @@ void CXXNameMangler::mangleLambda(const CXXRecordDecl *Lambda) { // if the host-side CXX ABI has different numbering for lambda. In such case, // if the mangle context is that device-side one, use the device-side lambda // mangling number for this lambda. - llvm::Optional<unsigned> DeviceNumber = + std::optional<unsigned> DeviceNumber = Context.getDiscriminatorOverride()(Context.getASTContext(), Lambda); - unsigned Number = DeviceNumber.hasValue() ? *DeviceNumber - : Lambda->getLambdaManglingNumber(); + unsigned Number = + DeviceNumber ? *DeviceNumber : Lambda->getLambdaManglingNumber(); assert(Number > 0 && "Lambda should be mangled as an unnamed class"); if (Number > 1) @@ -1970,8 +2129,14 @@ void CXXNameMangler::mangleLambda(const CXXRecordDecl *Lambda) { } void CXXNameMangler::mangleLambdaSig(const CXXRecordDecl *Lambda) { + // Proposed on https://github.com/itanium-cxx-abi/cxx-abi/issues/31. for (auto *D : Lambda->getLambdaExplicitTemplateParameters()) mangleTemplateParamDecl(D); + + // Proposed on https://github.com/itanium-cxx-abi/cxx-abi/issues/24. + if (auto *TPL = Lambda->getGenericLambdaTemplateParameterList()) + mangleRequiresClause(TPL->getRequiresClause()); + auto *Proto = Lambda->getLambdaTypeInfo()->getType()->castAs<FunctionProtoType>(); mangleBareFunctionType(Proto, /*MangleReturnType=*/false, @@ -2001,12 +2166,20 @@ void CXXNameMangler::manglePrefix(NestedNameSpecifier *qualifier) { return; case NestedNameSpecifier::Identifier: + // Clang 14 and before did not consider this substitutable. + bool Clang14Compat = isCompatibleWith(LangOptions::ClangABI::Ver14); + if (!Clang14Compat && mangleSubstitution(qualifier)) + return; + // Member expressions can have these without prefixes, but that // should end up in mangleUnresolvedPrefix instead. assert(qualifier->getPrefix()); manglePrefix(qualifier->getPrefix()); mangleSourceName(qualifier->getAsIdentifier()); + + if (!Clang14Compat) + addSubstitution(qualifier); return; } @@ -2021,7 +2194,7 @@ void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) { // ::= # empty // ::= <substitution> - DC = IgnoreLinkageSpecDecls(DC); + assert(!isa<LinkageSpecDecl>(DC) && "prefix cannot be LinkageSpecDecl"); if (DC->isTranslationUnit()) return; @@ -2029,8 +2202,6 @@ void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) { if (NoFunction && isLocalContainerContext(DC)) return; - assert(!isLocalContainerContext(DC)); - const NamedDecl *ND = cast<NamedDecl>(DC); if (mangleSubstitution(ND)) return; @@ -2042,10 +2213,11 @@ void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) { mangleTemplateArgs(asTemplateName(TD), *TemplateArgs); } else if (const NamedDecl *PrefixND = getClosurePrefix(ND)) { mangleClosurePrefix(PrefixND, NoFunction); - mangleUnqualifiedName(ND, nullptr); + mangleUnqualifiedName(ND, nullptr, nullptr); } else { - manglePrefix(getEffectiveDeclContext(ND), NoFunction); - mangleUnqualifiedName(ND, nullptr); + const DeclContext *DC = Context.getEffectiveDeclContext(ND); + manglePrefix(DC, NoFunction); + mangleUnqualifiedName(ND, DC, nullptr); } addSubstitution(ND); @@ -2063,8 +2235,7 @@ void CXXNameMangler::mangleTemplatePrefix(TemplateName Template) { // Clang 11 and before mangled the substitution for a dependent template name // after already having emitted (a substitution for) the prefix. - bool Clang11Compat = getASTContext().getLangOpts().getClangABICompat() <= - LangOptions::ClangABI::Ver11; + bool Clang11Compat = isCompatibleWith(LangOptions::ClangABI::Ver11); if (!Clang11Compat && mangleSubstitution(Template)) return; @@ -2098,19 +2269,20 @@ void CXXNameMangler::mangleTemplatePrefix(GlobalDecl GD, if (const auto *TTP = dyn_cast<TemplateTemplateParmDecl>(ND)) { mangleTemplateParameter(TTP->getDepth(), TTP->getIndex()); } else { - manglePrefix(getEffectiveDeclContext(ND), NoFunction); + const DeclContext *DC = Context.getEffectiveDeclContext(ND); + manglePrefix(DC, NoFunction); if (isa<BuiltinTemplateDecl>(ND) || isa<ConceptDecl>(ND)) - mangleUnqualifiedName(GD, nullptr); + mangleUnqualifiedName(GD, DC, nullptr); else - mangleUnqualifiedName(GD.getWithDecl(ND->getTemplatedDecl()), nullptr); + mangleUnqualifiedName(GD.getWithDecl(ND->getTemplatedDecl()), DC, + nullptr); } addSubstitution(ND); } const NamedDecl *CXXNameMangler::getClosurePrefix(const Decl *ND) { - if (getASTContext().getLangOpts().getClangABICompat() <= - LangOptions::ClangABI::Ver12) + if (isCompatibleWith(LangOptions::ClangABI::Ver12)) return nullptr; const NamedDecl *Context = nullptr; @@ -2143,8 +2315,9 @@ void CXXNameMangler::mangleClosurePrefix(const NamedDecl *ND, bool NoFunction) { mangleTemplatePrefix(TD, NoFunction); mangleTemplateArgs(asTemplateName(TD), *TemplateArgs); } else { - manglePrefix(getEffectiveDeclContext(ND), NoFunction); - mangleUnqualifiedName(ND, nullptr); + const auto *DC = Context.getEffectiveDeclContext(ND); + manglePrefix(DC, NoFunction); + mangleUnqualifiedName(ND, DC, nullptr); } Out << 'M'; @@ -2165,9 +2338,7 @@ void CXXNameMangler::mangleType(TemplateName TN) { switch (TN.getKind()) { case TemplateName::QualifiedTemplate: - TD = TN.getAsQualifiedTemplateName()->getTemplateDecl(); - goto HaveDecl; - + case TemplateName::UsingTemplate: case TemplateName::Template: TD = TN.getAsTemplateDecl(); goto HaveDecl; @@ -2226,6 +2397,7 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty, case Type::Complex: case Type::Adjusted: case Type::Decayed: + case Type::ArrayParameter: case Type::Pointer: case Type::BlockPointer: case Type::LValueReference: @@ -2246,6 +2418,7 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty, case Type::FunctionNoProto: case Type::Paren: case Type::Attributed: + case Type::BTFTagAttributed: case Type::Auto: case Type::DeducedTemplateSpecialization: case Type::PackExpansion: @@ -2256,8 +2429,9 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty, case Type::Atomic: case Type::Pipe: case Type::MacroQualified: - case Type::ExtInt: - case Type::DependentExtInt: + case Type::BitInt: + case Type::DependentBitInt: + case Type::CountAttributed: llvm_unreachable("type is illegal as a nested name specifier"); case Type::SubstTemplateTypeParmPack: @@ -2275,6 +2449,7 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty, case Type::TypeOfExpr: case Type::TypeOf: case Type::Decltype: + case Type::PackIndexing: case Type::TemplateTypeParm: case Type::UnaryTransform: case Type::SubstTemplateTypeParm: @@ -2343,6 +2518,12 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty, Out << "_SUBSTPACK_"; break; } + case TemplateName::UsingTemplate: { + TemplateDecl *TD = TN.getAsTemplateDecl(); + assert(TD && !isa<TemplateTemplateParmDecl>(TD)); + mangleSourceNameWithAbiTags(TD); + break; + } } // Note: we don't pass in the template name here. We are mangling the @@ -2350,7 +2531,7 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty, // conversions to the corresponding template parameter. // FIXME: Other compilers mangle partially-resolved template arguments in // unresolved-qualifier-levels. - mangleTemplateArgs(TemplateName(), TST->getArgs(), TST->getNumArgs()); + mangleTemplateArgs(TemplateName(), TST->template_arguments()); break; } @@ -2369,10 +2550,13 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty, TemplateName Template = getASTContext().getDependentTemplateName( DTST->getQualifier(), DTST->getIdentifier()); mangleSourceName(DTST->getIdentifier()); - mangleTemplateArgs(Template, DTST->getArgs(), DTST->getNumArgs()); + mangleTemplateArgs(Template, DTST->template_arguments()); break; } + case Type::Using: + return mangleUnresolvedTypeOrSimpleId(cast<UsingType>(Ty)->desugar(), + Prefix); case Type::Elaborated: return mangleUnresolvedTypeOrSimpleId( cast<ElaboratedType>(Ty)->getNamedType(), Prefix); @@ -2708,6 +2892,10 @@ static bool isTypeSubstitutable(Qualifiers Quals, const Type *Ty, return true; if (Ty->isOpenCLSpecificType()) return true; + // From Clang 18.0 we correctly treat SVE types as substitution candidates. + if (Ty->isSVESizelessBuiltinType() && + Ctx.getLangOpts().getClangABICompat() > LangOptions::ClangABI::Ver17) + return true; if (Ty->isBuiltinType()) return false; // Through to Clang 6.0, we accidentally treated undeduced auto types as @@ -2860,6 +3048,7 @@ void CXXNameMangler::mangleType(const BuiltinType *T) { // ::= d # double // ::= e # long double, __float80 // ::= g # __float128 + // ::= g # __ibm128 // UNSUPPORTED: ::= Dd # IEEE 754r decimal floating point (64 bits) // UNSUPPORTED: ::= De # IEEE 754r decimal floating point (128 bits) // UNSUPPORTED: ::= Df # IEEE 754r decimal floating point (32 bits) @@ -2868,8 +3057,97 @@ void CXXNameMangler::mangleType(const BuiltinType *T) { // ::= Di # char32_t // ::= Ds # char16_t // ::= Dn # std::nullptr_t (i.e., decltype(nullptr)) + // ::= [DS] DA # N1169 fixed-point [_Sat] T _Accum + // ::= [DS] DR # N1169 fixed-point [_Sat] T _Fract // ::= u <source-name> # vendor extended type + // + // <fixed-point-size> + // ::= s # short + // ::= t # unsigned short + // ::= i # plain + // ::= j # unsigned + // ::= l # long + // ::= m # unsigned long std::string type_name; + // Normalize integer types as vendor extended types: + // u<length>i<type size> + // u<length>u<type size> + if (NormalizeIntegers && T->isInteger()) { + if (T->isSignedInteger()) { + switch (getASTContext().getTypeSize(T)) { + case 8: + // Pick a representative for each integer size in the substitution + // dictionary. (Its actual defined size is not relevant.) + if (mangleSubstitution(BuiltinType::SChar)) + break; + Out << "u2i8"; + addSubstitution(BuiltinType::SChar); + break; + case 16: + if (mangleSubstitution(BuiltinType::Short)) + break; + Out << "u3i16"; + addSubstitution(BuiltinType::Short); + break; + case 32: + if (mangleSubstitution(BuiltinType::Int)) + break; + Out << "u3i32"; + addSubstitution(BuiltinType::Int); + break; + case 64: + if (mangleSubstitution(BuiltinType::Long)) + break; + Out << "u3i64"; + addSubstitution(BuiltinType::Long); + break; + case 128: + if (mangleSubstitution(BuiltinType::Int128)) + break; + Out << "u4i128"; + addSubstitution(BuiltinType::Int128); + break; + default: + llvm_unreachable("Unknown integer size for normalization"); + } + } else { + switch (getASTContext().getTypeSize(T)) { + case 8: + if (mangleSubstitution(BuiltinType::UChar)) + break; + Out << "u2u8"; + addSubstitution(BuiltinType::UChar); + break; + case 16: + if (mangleSubstitution(BuiltinType::UShort)) + break; + Out << "u3u16"; + addSubstitution(BuiltinType::UShort); + break; + case 32: + if (mangleSubstitution(BuiltinType::UInt)) + break; + Out << "u3u32"; + addSubstitution(BuiltinType::UInt); + break; + case 64: + if (mangleSubstitution(BuiltinType::ULong)) + break; + Out << "u3u64"; + addSubstitution(BuiltinType::ULong); + break; + case 128: + if (mangleSubstitution(BuiltinType::UInt128)) + break; + Out << "u4u128"; + addSubstitution(BuiltinType::UInt128); + break; + default: + llvm_unreachable("Unknown integer size for normalization"); + } + } + return; + } switch (T->getKind()) { case BuiltinType::Void: Out << 'v'; @@ -2934,30 +3212,77 @@ void CXXNameMangler::mangleType(const BuiltinType *T) { Out << "DF16_"; break; case BuiltinType::ShortAccum: + Out << "DAs"; + break; case BuiltinType::Accum: + Out << "DAi"; + break; case BuiltinType::LongAccum: + Out << "DAl"; + break; case BuiltinType::UShortAccum: + Out << "DAt"; + break; case BuiltinType::UAccum: + Out << "DAj"; + break; case BuiltinType::ULongAccum: + Out << "DAm"; + break; case BuiltinType::ShortFract: + Out << "DRs"; + break; case BuiltinType::Fract: + Out << "DRi"; + break; case BuiltinType::LongFract: + Out << "DRl"; + break; case BuiltinType::UShortFract: + Out << "DRt"; + break; case BuiltinType::UFract: + Out << "DRj"; + break; case BuiltinType::ULongFract: + Out << "DRm"; + break; case BuiltinType::SatShortAccum: + Out << "DSDAs"; + break; case BuiltinType::SatAccum: + Out << "DSDAi"; + break; case BuiltinType::SatLongAccum: + Out << "DSDAl"; + break; case BuiltinType::SatUShortAccum: + Out << "DSDAt"; + break; case BuiltinType::SatUAccum: + Out << "DSDAj"; + break; case BuiltinType::SatULongAccum: + Out << "DSDAm"; + break; case BuiltinType::SatShortFract: + Out << "DSDRs"; + break; case BuiltinType::SatFract: + Out << "DSDRi"; + break; case BuiltinType::SatLongFract: + Out << "DSDRl"; + break; case BuiltinType::SatUShortFract: + Out << "DSDRt"; + break; case BuiltinType::SatUFract: + Out << "DSDRj"; + break; case BuiltinType::SatULongFract: - llvm_unreachable("Fixed point types are disabled for c++"); + Out << "DSDRm"; + break; case BuiltinType::Half: Out << "Dh"; break; @@ -2968,26 +3293,38 @@ void CXXNameMangler::mangleType(const BuiltinType *T) { Out << 'd'; break; case BuiltinType::LongDouble: { - const TargetInfo *TI = getASTContext().getLangOpts().OpenMP && - getASTContext().getLangOpts().OpenMPIsDevice - ? getASTContext().getAuxTargetInfo() - : &getASTContext().getTargetInfo(); + const TargetInfo *TI = + getASTContext().getLangOpts().OpenMP && + getASTContext().getLangOpts().OpenMPIsTargetDevice + ? getASTContext().getAuxTargetInfo() + : &getASTContext().getTargetInfo(); Out << TI->getLongDoubleMangling(); break; } case BuiltinType::Float128: { - const TargetInfo *TI = getASTContext().getLangOpts().OpenMP && - getASTContext().getLangOpts().OpenMPIsDevice - ? getASTContext().getAuxTargetInfo() - : &getASTContext().getTargetInfo(); + const TargetInfo *TI = + getASTContext().getLangOpts().OpenMP && + getASTContext().getLangOpts().OpenMPIsTargetDevice + ? getASTContext().getAuxTargetInfo() + : &getASTContext().getTargetInfo(); Out << TI->getFloat128Mangling(); break; } case BuiltinType::BFloat16: { - const TargetInfo *TI = &getASTContext().getTargetInfo(); + const TargetInfo *TI = + ((getASTContext().getLangOpts().OpenMP && + getASTContext().getLangOpts().OpenMPIsTargetDevice) || + getASTContext().getLangOpts().SYCLIsDevice) + ? getASTContext().getAuxTargetInfo() + : &getASTContext().getTargetInfo(); Out << TI->getBFloat16Mangling(); break; } + case BuiltinType::Ibm128: { + const TargetInfo *TI = &getASTContext().getTargetInfo(); + Out << TI->getIbm128Mangling(); + break; + } case BuiltinType::NullPtr: Out << "Dn"; break; @@ -3042,11 +3379,24 @@ void CXXNameMangler::mangleType(const BuiltinType *T) { #define SVE_VECTOR_TYPE(InternalName, MangledName, Id, SingletonId, NumEls, \ ElBits, IsSigned, IsFP, IsBF) \ case BuiltinType::Id: \ + if (T->getKind() == BuiltinType::SveBFloat16 && \ + isCompatibleWith(LangOptions::ClangABI::Ver17)) { \ + /* Prior to Clang 18.0 we used this incorrect mangled name */ \ + type_name = "__SVBFloat16_t"; \ + Out << "u" << type_name.size() << type_name; \ + } else { \ + type_name = MangledName; \ + Out << (type_name == InternalName ? "u" : "") << type_name.size() \ + << type_name; \ + } \ + break; +#define SVE_PREDICATE_TYPE(InternalName, MangledName, Id, SingletonId, NumEls) \ + case BuiltinType::Id: \ type_name = MangledName; \ Out << (type_name == InternalName ? "u" : "") << type_name.size() \ << type_name; \ break; -#define SVE_PREDICATE_TYPE(InternalName, MangledName, Id, SingletonId, NumEls) \ +#define SVE_OPAQUE_TYPE(InternalName, MangledName, Id, SingletonId) \ case BuiltinType::Id: \ type_name = MangledName; \ Out << (type_name == InternalName ? "u" : "") << type_name.size() \ @@ -3066,6 +3416,18 @@ void CXXNameMangler::mangleType(const BuiltinType *T) { Out << 'u' << type_name.size() << type_name; \ break; #include "clang/Basic/RISCVVTypes.def" +#define WASM_REF_TYPE(InternalName, MangledName, Id, SingletonId, AS) \ + case BuiltinType::Id: \ + type_name = MangledName; \ + Out << 'u' << type_name.size() << type_name; \ + break; +#include "clang/Basic/WebAssemblyReferenceTypes.def" +#define AMDGPU_TYPE(Name, Id, SingletonId) \ + case BuiltinType::Id: \ + type_name = Name; \ + Out << 'u' << type_name.size() << type_name; \ + break; +#include "clang/Basic/AMDGPUTypes.def" } } @@ -3080,11 +3442,16 @@ StringRef CXXNameMangler::getCallingConvQualifierName(CallingConv CC) { case CC_AAPCS: case CC_AAPCS_VFP: case CC_AArch64VectorCall: + case CC_AArch64SVEPCS: + case CC_AMDGPUKernelCall: case CC_IntelOclBicc: case CC_SpirFunction: case CC_OpenCLKernel: case CC_PreserveMost: case CC_PreserveAll: + case CC_M68kRTD: + case CC_PreserveNone: + case CC_RISCVVectorCall: // FIXME: we should be mangling all of the above. return ""; @@ -3242,39 +3609,42 @@ void CXXNameMangler::mangleBareFunctionType(const FunctionProtoType *Proto, if (Proto->getNumParams() == 0 && !Proto->isVariadic()) { // <builtin-type> ::= v # void Out << 'v'; + } else { + assert(!FD || FD->getNumParams() == Proto->getNumParams()); + for (unsigned I = 0, E = Proto->getNumParams(); I != E; ++I) { + // Mangle extended parameter info as order-sensitive qualifiers here. + if (Proto->hasExtParameterInfos() && FD == nullptr) { + mangleExtParameterInfo(Proto->getExtParameterInfo(I)); + } - FunctionTypeDepth.pop(saved); - return; - } - - assert(!FD || FD->getNumParams() == Proto->getNumParams()); - for (unsigned I = 0, E = Proto->getNumParams(); I != E; ++I) { - // Mangle extended parameter info as order-sensitive qualifiers here. - if (Proto->hasExtParameterInfos() && FD == nullptr) { - mangleExtParameterInfo(Proto->getExtParameterInfo(I)); + // Mangle the type. + QualType ParamTy = Proto->getParamType(I); + mangleType(Context.getASTContext().getSignatureParameterType(ParamTy)); + + if (FD) { + if (auto *Attr = FD->getParamDecl(I)->getAttr<PassObjectSizeAttr>()) { + // Attr can only take 1 character, so we can hardcode the length + // below. + assert(Attr->getType() <= 9 && Attr->getType() >= 0); + if (Attr->isDynamic()) + Out << "U25pass_dynamic_object_size" << Attr->getType(); + else + Out << "U17pass_object_size" << Attr->getType(); + } + } } - // Mangle the type. - QualType ParamTy = Proto->getParamType(I); - mangleType(Context.getASTContext().getSignatureParameterType(ParamTy)); + // <builtin-type> ::= z # ellipsis + if (Proto->isVariadic()) + Out << 'z'; + } - if (FD) { - if (auto *Attr = FD->getParamDecl(I)->getAttr<PassObjectSizeAttr>()) { - // Attr can only take 1 character, so we can hardcode the length below. - assert(Attr->getType() <= 9 && Attr->getType() >= 0); - if (Attr->isDynamic()) - Out << "U25pass_dynamic_object_size" << Attr->getType(); - else - Out << "U17pass_object_size" << Attr->getType(); - } - } + if (FD) { + FunctionTypeDepth.enterResultType(); + mangleRequiresClause(FD->getTrailingRequiresClause()); } FunctionTypeDepth.pop(saved); - - // <builtin-type> ::= z # ellipsis - if (Proto->isVariadic()) - Out << 'z'; } // <type> ::= <class-enum-type> @@ -3404,7 +3774,7 @@ void CXXNameMangler::mangleNeonVectorType(const VectorType *T) { QualType EltType = T->getElementType(); assert(EltType->isBuiltinType() && "Neon vector element not a BuiltinType"); const char *EltName = nullptr; - if (T->getVectorKind() == VectorType::NeonPolyVector) { + if (T->getVectorKind() == VectorKind::NeonPoly) { switch (cast<BuiltinType>(EltType)->getKind()) { case BuiltinType::SChar: case BuiltinType::UChar: @@ -3506,7 +3876,7 @@ void CXXNameMangler::mangleAArch64NeonVectorType(const VectorType *T) { "Neon vector type not 64 or 128 bits"); StringRef EltName; - if (T->getVectorKind() == VectorType::NeonPolyVector) { + if (T->getVectorKind() == VectorKind::NeonPoly) { switch (cast<BuiltinType>(EltType)->getKind()) { case BuiltinType::UChar: EltName = "Poly8"; @@ -3559,10 +3929,10 @@ void CXXNameMangler::mangleAArch64NeonVectorType(const DependentVectorType *T) { // mangling scheme, it will be specified in the next revision. The mangling // scheme is otherwise defined in the appendices to the Procedure Call Standard // for the Arm Architecture, see -// https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst#appendix-c-mangling +// https://github.com/ARM-software/abi-aa/blob/main/aapcs64/aapcs64.rst#appendix-c-mangling void CXXNameMangler::mangleAArch64FixedSveVectorType(const VectorType *T) { - assert((T->getVectorKind() == VectorType::SveFixedLengthDataVector || - T->getVectorKind() == VectorType::SveFixedLengthPredicateVector) && + assert((T->getVectorKind() == VectorKind::SveFixedLengthData || + T->getVectorKind() == VectorKind::SveFixedLengthPredicate) && "expected fixed-length SVE vector!"); QualType EltType = T->getElementType(); @@ -3575,7 +3945,7 @@ void CXXNameMangler::mangleAArch64FixedSveVectorType(const VectorType *T) { TypeName = "__SVInt8_t"; break; case BuiltinType::UChar: { - if (T->getVectorKind() == VectorType::SveFixedLengthDataVector) + if (T->getVectorKind() == VectorKind::SveFixedLengthData) TypeName = "__SVUint8_t"; else TypeName = "__SVBool_t"; @@ -3617,7 +3987,7 @@ void CXXNameMangler::mangleAArch64FixedSveVectorType(const VectorType *T) { unsigned VecSizeInBits = getASTContext().getTypeInfo(T).Width; - if (T->getVectorKind() == VectorType::SveFixedLengthPredicateVector) + if (T->getVectorKind() == VectorKind::SveFixedLengthPredicate) VecSizeInBits *= 8; Out << "9__SVE_VLSI" << 'u' << TypeName.size() << TypeName << "Lj" @@ -3633,6 +4003,90 @@ void CXXNameMangler::mangleAArch64FixedSveVectorType( Diags.Report(T->getAttributeLoc(), DiagID); } +void CXXNameMangler::mangleRISCVFixedRVVVectorType(const VectorType *T) { + assert((T->getVectorKind() == VectorKind::RVVFixedLengthData || + T->getVectorKind() == VectorKind::RVVFixedLengthMask) && + "expected fixed-length RVV vector!"); + + QualType EltType = T->getElementType(); + assert(EltType->isBuiltinType() && + "expected builtin type for fixed-length RVV vector!"); + + SmallString<20> TypeNameStr; + llvm::raw_svector_ostream TypeNameOS(TypeNameStr); + TypeNameOS << "__rvv_"; + switch (cast<BuiltinType>(EltType)->getKind()) { + case BuiltinType::SChar: + TypeNameOS << "int8"; + break; + case BuiltinType::UChar: + if (T->getVectorKind() == VectorKind::RVVFixedLengthData) + TypeNameOS << "uint8"; + else + TypeNameOS << "bool"; + break; + case BuiltinType::Short: + TypeNameOS << "int16"; + break; + case BuiltinType::UShort: + TypeNameOS << "uint16"; + break; + case BuiltinType::Int: + TypeNameOS << "int32"; + break; + case BuiltinType::UInt: + TypeNameOS << "uint32"; + break; + case BuiltinType::Long: + TypeNameOS << "int64"; + break; + case BuiltinType::ULong: + TypeNameOS << "uint64"; + break; + case BuiltinType::Float16: + TypeNameOS << "float16"; + break; + case BuiltinType::Float: + TypeNameOS << "float32"; + break; + case BuiltinType::Double: + TypeNameOS << "float64"; + break; + default: + llvm_unreachable("unexpected element type for fixed-length RVV vector!"); + } + + unsigned VecSizeInBits = getASTContext().getTypeInfo(T).Width; + + // Apend the LMUL suffix. + auto VScale = getASTContext().getTargetInfo().getVScaleRange( + getASTContext().getLangOpts()); + unsigned VLen = VScale->first * llvm::RISCV::RVVBitsPerBlock; + + if (T->getVectorKind() == VectorKind::RVVFixedLengthData) { + TypeNameOS << 'm'; + if (VecSizeInBits >= VLen) + TypeNameOS << (VecSizeInBits / VLen); + else + TypeNameOS << 'f' << (VLen / VecSizeInBits); + } else { + TypeNameOS << (VLen / VecSizeInBits); + } + TypeNameOS << "_t"; + + Out << "9__RVV_VLSI" << 'u' << TypeNameStr.size() << TypeNameStr << "Lj" + << VecSizeInBits << "EE"; +} + +void CXXNameMangler::mangleRISCVFixedRVVVectorType( + const DependentVectorType *T) { + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID( + DiagnosticsEngine::Error, + "cannot mangle this dependent fixed-length RVV vector type yet"); + Diags.Report(T->getAttributeLoc(), DiagID); +} + // GNU extension: vector types // <type> ::= <vector-type> // <vector-type> ::= Dv <positive dimension number> _ @@ -3642,8 +4096,8 @@ void CXXNameMangler::mangleAArch64FixedSveVectorType( // ::= p # AltiVec vector pixel // ::= b # Altivec vector bool void CXXNameMangler::mangleType(const VectorType *T) { - if ((T->getVectorKind() == VectorType::NeonVector || - T->getVectorKind() == VectorType::NeonPolyVector)) { + if ((T->getVectorKind() == VectorKind::Neon || + T->getVectorKind() == VectorKind::NeonPoly)) { llvm::Triple Target = getASTContext().getTargetInfo().getTriple(); llvm::Triple::ArchType Arch = getASTContext().getTargetInfo().getTriple().getArch(); @@ -3653,23 +4107,27 @@ void CXXNameMangler::mangleType(const VectorType *T) { else mangleNeonVectorType(T); return; - } else if (T->getVectorKind() == VectorType::SveFixedLengthDataVector || - T->getVectorKind() == VectorType::SveFixedLengthPredicateVector) { + } else if (T->getVectorKind() == VectorKind::SveFixedLengthData || + T->getVectorKind() == VectorKind::SveFixedLengthPredicate) { mangleAArch64FixedSveVectorType(T); return; + } else if (T->getVectorKind() == VectorKind::RVVFixedLengthData || + T->getVectorKind() == VectorKind::RVVFixedLengthMask) { + mangleRISCVFixedRVVVectorType(T); + return; } Out << "Dv" << T->getNumElements() << '_'; - if (T->getVectorKind() == VectorType::AltiVecPixel) + if (T->getVectorKind() == VectorKind::AltiVecPixel) Out << 'p'; - else if (T->getVectorKind() == VectorType::AltiVecBool) + else if (T->getVectorKind() == VectorKind::AltiVecBool) Out << 'b'; else mangleType(T->getElementType()); } void CXXNameMangler::mangleType(const DependentVectorType *T) { - if ((T->getVectorKind() == VectorType::NeonVector || - T->getVectorKind() == VectorType::NeonPolyVector)) { + if ((T->getVectorKind() == VectorKind::Neon || + T->getVectorKind() == VectorKind::NeonPoly)) { llvm::Triple Target = getASTContext().getTargetInfo().getTriple(); llvm::Triple::ArchType Arch = getASTContext().getTargetInfo().getTriple().getArch(); @@ -3679,18 +4137,21 @@ void CXXNameMangler::mangleType(const DependentVectorType *T) { else mangleNeonVectorType(T); return; - } else if (T->getVectorKind() == VectorType::SveFixedLengthDataVector || - T->getVectorKind() == VectorType::SveFixedLengthPredicateVector) { + } else if (T->getVectorKind() == VectorKind::SveFixedLengthData || + T->getVectorKind() == VectorKind::SveFixedLengthPredicate) { mangleAArch64FixedSveVectorType(T); return; + } else if (T->getVectorKind() == VectorKind::RVVFixedLengthData) { + mangleRISCVFixedRVVVectorType(T); + return; } Out << "Dv"; mangleExpression(T->getSizeExpr()); Out << '_'; - if (T->getVectorKind() == VectorType::AltiVecPixel) + if (T->getVectorKind() == VectorKind::AltiVecPixel) Out << 'p'; - else if (T->getVectorKind() == VectorType::AltiVecBool) + else if (T->getVectorKind() == VectorKind::AltiVecBool) Out << 'b'; else mangleType(T->getElementType()); @@ -3751,6 +4212,13 @@ void CXXNameMangler::mangleType(const PackExpansionType *T) { mangleType(T->getPattern()); } +void CXXNameMangler::mangleType(const PackIndexingType *T) { + if (!T->hasSelectedType()) + mangleType(T->getPattern()); + else + mangleType(T->getSelectedType()); +} + void CXXNameMangler::mangleType(const ObjCInterfaceType *T) { mangleSourceName(T->getDecl()->getIdentifier()); } @@ -3797,7 +4265,7 @@ void CXXNameMangler::mangleType(const InjectedClassNameType *T) { void CXXNameMangler::mangleType(const TemplateSpecializationType *T) { if (TemplateDecl *TD = T->getTemplateName().getAsTemplateDecl()) { - mangleTemplateName(TD, T->getArgs(), T->getNumArgs()); + mangleTemplateName(TD, T->template_arguments()); } else { if (mangleSubstitution(QualType(T, 0))) return; @@ -3807,7 +4275,7 @@ void CXXNameMangler::mangleType(const TemplateSpecializationType *T) { // FIXME: GCC does not appear to mangle the template arguments when // the template in question is a dependent template name. Should we // emulate that badness? - mangleTemplateArgs(T->getTemplateName(), T->getArgs(), T->getNumArgs()); + mangleTemplateArgs(T->getTemplateName(), T->template_arguments()); addSubstitution(QualType(T, 0)); } } @@ -3824,20 +4292,20 @@ void CXXNameMangler::mangleType(const DependentNameType *T) { // ::= Te <name> # dependent elaborated type specifier using // # 'enum' switch (T->getKeyword()) { - case ETK_None: - case ETK_Typename: - break; - case ETK_Struct: - case ETK_Class: - case ETK_Interface: - Out << "Ts"; - break; - case ETK_Union: - Out << "Tu"; - break; - case ETK_Enum: - Out << "Te"; - break; + case ElaboratedTypeKeyword::None: + case ElaboratedTypeKeyword::Typename: + break; + case ElaboratedTypeKeyword::Struct: + case ElaboratedTypeKeyword::Class: + case ElaboratedTypeKeyword::Interface: + Out << "Ts"; + break; + case ElaboratedTypeKeyword::Union: + Out << "Tu"; + break; + case ElaboratedTypeKeyword::Enum: + Out << "Te"; + break; } // Typename types are always nested Out << 'N'; @@ -3859,7 +4327,7 @@ void CXXNameMangler::mangleType(const DependentTemplateSpecializationType *T) { // FIXME: GCC does not appear to mangle the template arguments when // the template in question is a dependent template name. Should we // emulate that badness? - mangleTemplateArgs(Prefix, T->getArgs(), T->getNumArgs()); + mangleTemplateArgs(Prefix, T->template_arguments()); Out << 'E'; } @@ -3903,16 +4371,22 @@ void CXXNameMangler::mangleType(const UnaryTransformType *T) { // If this is dependent, we need to record that. If not, we simply // mangle it as the underlying type since they are equivalent. if (T->isDependentType()) { - Out << 'U'; + Out << "u"; + StringRef BuiltinName; switch (T->getUTTKind()) { - case UnaryTransformType::EnumUnderlyingType: - Out << "3eut"; - break; +#define TRANSFORM_TYPE_TRAIT_DEF(Enum, Trait) \ + case UnaryTransformType::Enum: \ + BuiltinName = "__" #Trait; \ + break; +#include "clang/Basic/TransformTypeTraits.def" } + Out << BuiltinName.size() << BuiltinName; } + Out << "I"; mangleType(T->getBaseType()); + Out << "E"; } void CXXNameMangler::mangleType(const AutoType *T) { @@ -3922,7 +4396,15 @@ void CXXNameMangler::mangleType(const AutoType *T) { "shouldn't need to mangle __auto_type!"); // <builtin-type> ::= Da # auto // ::= Dc # decltype(auto) - Out << (T->isDecltypeAuto() ? "Dc" : "Da"); + // ::= Dk # constrained auto + // ::= DK # constrained decltype(auto) + if (T->isConstrained() && !isCompatibleWith(LangOptions::ClangABI::Ver17)) { + Out << (T->isDecltypeAuto() ? "DK" : "Dk"); + mangleTypeConstraint(T->getTypeConstraintConcept(), + T->getTypeConstraintArguments()); + } else { + Out << (T->isDecltypeAuto() ? "Dc" : "Da"); + } } void CXXNameMangler::mangleType(const DeducedTemplateSpecializationType *T) { @@ -3954,26 +4436,24 @@ void CXXNameMangler::mangleType(const PipeType *T) { Out << "8ocl_pipe"; } -void CXXNameMangler::mangleType(const ExtIntType *T) { - Out << "U7_ExtInt"; - llvm::APSInt BW(32, true); - BW = T->getNumBits(); - TemplateArgument TA(Context.getASTContext(), BW, getASTContext().IntTy); - mangleTemplateArgs(TemplateName(), &TA, 1); - if (T->isUnsigned()) - Out << "j"; - else - Out << "i"; +void CXXNameMangler::mangleType(const BitIntType *T) { + // 5.1.5.2 Builtin types + // <type> ::= DB <number | instantiation-dependent expression> _ + // ::= DU <number | instantiation-dependent expression> _ + Out << "D" << (T->isUnsigned() ? "U" : "B") << T->getNumBits() << "_"; } -void CXXNameMangler::mangleType(const DependentExtIntType *T) { - Out << "U7_ExtInt"; - TemplateArgument TA(T->getNumBitsExpr()); - mangleTemplateArgs(TemplateName(), &TA, 1); - if (T->isUnsigned()) - Out << "j"; - else - Out << "i"; +void CXXNameMangler::mangleType(const DependentBitIntType *T) { + // 5.1.5.2 Builtin types + // <type> ::= DB <number | instantiation-dependent expression> _ + // ::= DU <number | instantiation-dependent expression> _ + Out << "D" << (T->isUnsigned() ? "U" : "B"); + mangleExpression(T->getNumBitsExpr()); + Out << "_"; +} + +void CXXNameMangler::mangleType(const ArrayParameterType *T) { + mangleType(cast<ConstantArrayType>(T)); } void CXXNameMangler::mangleIntegerLiteral(QualType T, @@ -4076,6 +4556,74 @@ void CXXNameMangler::mangleInitListElements(const InitListExpr *InitList) { mangleExpression(InitList->getInit(i)); } +void CXXNameMangler::mangleRequirement(SourceLocation RequiresExprLoc, + const concepts::Requirement *Req) { + using concepts::Requirement; + + // TODO: We can't mangle the result of a failed substitution. It's not clear + // whether we should be mangling the original form prior to any substitution + // instead. See https://lists.isocpp.org/core/2023/04/14118.php + auto HandleSubstitutionFailure = + [&](SourceLocation Loc) { + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID( + DiagnosticsEngine::Error, "cannot mangle this requires-expression " + "containing a substitution failure"); + Diags.Report(Loc, DiagID); + Out << 'F'; + }; + + switch (Req->getKind()) { + case Requirement::RK_Type: { + const auto *TR = cast<concepts::TypeRequirement>(Req); + if (TR->isSubstitutionFailure()) + return HandleSubstitutionFailure( + TR->getSubstitutionDiagnostic()->DiagLoc); + + Out << 'T'; + mangleType(TR->getType()->getType()); + break; + } + + case Requirement::RK_Simple: + case Requirement::RK_Compound: { + const auto *ER = cast<concepts::ExprRequirement>(Req); + if (ER->isExprSubstitutionFailure()) + return HandleSubstitutionFailure( + ER->getExprSubstitutionDiagnostic()->DiagLoc); + + Out << 'X'; + mangleExpression(ER->getExpr()); + + if (ER->hasNoexceptRequirement()) + Out << 'N'; + + if (!ER->getReturnTypeRequirement().isEmpty()) { + if (ER->getReturnTypeRequirement().isSubstitutionFailure()) + return HandleSubstitutionFailure(ER->getReturnTypeRequirement() + .getSubstitutionDiagnostic() + ->DiagLoc); + + Out << 'R'; + mangleTypeConstraint(ER->getReturnTypeRequirement().getTypeConstraint()); + } + break; + } + + case Requirement::RK_Nested: + const auto *NR = cast<concepts::NestedRequirement>(Req); + if (NR->hasInvalidConstraint()) { + // FIXME: NestedRequirement should track the location of its requires + // keyword. + return HandleSubstitutionFailure(RequiresExprLoc); + } + + Out << 'Q'; + mangleExpression(NR->getConstraintExpr()); + break; + } +} + void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity, bool AsTemplateArg) { // <expression> ::= <unary operator-name> <expression> @@ -4168,15 +4716,16 @@ recurse: case Expr::ArrayInitIndexExprClass: case Expr::NoInitExprClass: case Expr::ParenListExprClass: - case Expr::LambdaExprClass: case Expr::MSPropertyRefExprClass: case Expr::MSPropertySubscriptExprClass: case Expr::TypoExprClass: // This should no longer exist in the AST by now. case Expr::RecoveryExprClass: - case Expr::OMPArraySectionExprClass: + case Expr::ArraySectionExprClass: case Expr::OMPArrayShapingExprClass: case Expr::OMPIteratorExprClass: case Expr::CXXInheritedCtorInitExprClass: + case Expr::CXXParenListInitExprClass: + case Expr::PackIndexingExprClass: llvm_unreachable("unexpected statement kind"); case Expr::ConstantExprClass: @@ -4208,8 +4757,6 @@ recurse: case Expr::ShuffleVectorExprClass: case Expr::ConvertVectorExprClass: case Expr::StmtExprClass: - case Expr::TypeTraitExprClass: - case Expr::RequiresExprClass: case Expr::ArrayTypeTraitExprClass: case Expr::ExpressionTraitExprClass: case Expr::VAArgExprClass: @@ -4218,6 +4765,7 @@ recurse: case Expr::PseudoObjectExprClass: case Expr::AtomicExprClass: case Expr::SourceLocExprClass: + case Expr::EmbedExprClass: case Expr::BuiltinBitCastExprClass: { NotPrimaryExpr(); @@ -4238,8 +4786,7 @@ recurse: const CXXUuidofExpr *UE = cast<CXXUuidofExpr>(E); // As of clang 12, uuidof uses the vendor extended expression // mangling. Previously, it used a special-cased nonstandard extension. - if (Context.getASTContext().getLangOpts().getClangABICompat() > - LangOptions::ClangABI::Ver11) { + if (!isCompatibleWith(LangOptions::ClangABI::Ver11)) { Out << "u8__uuidof"; if (UE->isTypeOperand()) mangleType(UE->getTypeOperand(Context.getASTContext())); @@ -4318,9 +4865,23 @@ recurse: E = cast<CXXStdInitializerListExpr>(E)->getSubExpr(); goto recurse; - case Expr::SubstNonTypeTemplateParmExprClass: + case Expr::SubstNonTypeTemplateParmExprClass: { + // Mangle a substituted parameter the same way we mangle the template + // argument. + auto *SNTTPE = cast<SubstNonTypeTemplateParmExpr>(E); + if (auto *CE = dyn_cast<ConstantExpr>(SNTTPE->getReplacement())) { + // Pull out the constant value and mangle it as a template argument. + QualType ParamType = SNTTPE->getParameterType(Context.getASTContext()); + assert(CE->hasAPValueResult() && "expected the NTTP to have an APValue"); + mangleValueInTemplateArg(ParamType, CE->getAPValueResult(), false, + /*NeedExactType=*/true); + break; + } + // The remaining cases all happen to be substituted with expressions that + // mangle the same as a corresponding template argument anyway. E = cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(); goto recurse; + } case Expr::UserDefinedLiteralClass: // We follow g++'s approach of mangling a UDL as a call to the literal @@ -4368,7 +4929,7 @@ recurse: Out << '_'; mangleType(New->getAllocatedType()); if (New->hasInitializer()) { - if (New->getInitializationStyle() == CXXNewExpr::ListInit) + if (New->getInitializationStyle() == CXXNewInitializationStyle::Braces) Out << "il"; else Out << "pi"; @@ -4382,7 +4943,8 @@ recurse: } else if (const ParenListExpr *PLE = dyn_cast<ParenListExpr>(Init)) { for (unsigned i = 0, e = PLE->getNumExprs(); i != e; ++i) mangleExpression(PLE->getExpr(i)); - } else if (New->getInitializationStyle() == CXXNewExpr::ListInit && + } else if (New->getInitializationStyle() == + CXXNewInitializationStyle::Braces && isa<InitListExpr>(Init)) { // Only take InitListExprs apart for list-initialization. mangleInitListElements(cast<InitListExpr>(Init)); @@ -4562,6 +5124,10 @@ recurse: // If the result of the operator is implicitly converted to a known // integer type, that type is used for the literal; otherwise, the type // of std::size_t or std::ptrdiff_t is used. + // + // FIXME: We still include the operand in the profile in this case. This + // can lead to mangling collisions between function templates that we + // consider to be different. QualType T = (ImplicitlyConvertedToType.isNull() || !ImplicitlyConvertedToType->isIntegerType())? SAE->getType() : ImplicitlyConvertedToType; @@ -4591,8 +5157,7 @@ recurse: // As of clang 12, we mangle __alignof__ differently than alignof. (They // have acted differently since Clang 8, but were previously mangled the // same.) - if (Context.getASTContext().getLangOpts().getClangABICompat() > - LangOptions::ClangABI::Ver11) { + if (!isCompatibleWith(LangOptions::ClangABI::Ver11)) { Out << "u11__alignof__"; if (SAE->isArgumentType()) mangleType(SAE->getArgumentType()); @@ -4601,11 +5166,27 @@ recurse: Out << 'E'; break; } - LLVM_FALLTHROUGH; + [[fallthrough]]; case UETT_AlignOf: Out << 'a'; MangleAlignofSizeofArg(); break; + case UETT_DataSizeOf: { + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = + Diags.getCustomDiagID(DiagnosticsEngine::Error, + "cannot yet mangle __datasizeof expression"); + Diags.Report(DiagID); + return; + } + case UETT_PtrAuthTypeDiscriminator: { + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID( + DiagnosticsEngine::Error, + "cannot yet mangle __builtin_ptrauth_type_discriminator expression"); + Diags.Report(E->getExprLoc(), DiagID); + return; + } case UETT_VecStep: { DiagnosticsEngine &Diags = Context.getDiags(); unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, @@ -4621,10 +5202,32 @@ recurse: Diags.Report(DiagID); return; } + case UETT_VectorElements: { + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID( + DiagnosticsEngine::Error, + "cannot yet mangle __builtin_vectorelements expression"); + Diags.Report(DiagID); + return; + } } break; } + case Expr::TypeTraitExprClass: { + // <expression> ::= u <source-name> <template-arg>* E # vendor extension + const TypeTraitExpr *TTE = cast<TypeTraitExpr>(E); + NotPrimaryExpr(); + Out << 'u'; + llvm::StringRef Spelling = getTraitSpelling(TTE->getTrait()); + Out << Spelling.size() << Spelling; + for (TypeSourceInfo *TSI : TTE->getArgs()) { + mangleType(TSI->getType()); + } + Out << 'E'; + break; + } + case Expr::CXXThrowExprClass: { NotPrimaryExpr(); const CXXThrowExpr *TE = cast<CXXThrowExpr>(E); @@ -4813,13 +5416,57 @@ recurse: goto recurse; case Expr::ConceptSpecializationExprClass: { - // <expr-primary> ::= L <mangled-name> E # external name - Out << "L_Z"; auto *CSE = cast<ConceptSpecializationExpr>(E); - mangleTemplateName(CSE->getNamedConcept(), - CSE->getTemplateArguments().data(), - CSE->getTemplateArguments().size()); - Out << 'E'; + if (isCompatibleWith(LangOptions::ClangABI::Ver17)) { + // Clang 17 and before mangled concept-ids as if they resolved to an + // entity, meaning that references to enclosing template arguments don't + // work. + Out << "L_Z"; + mangleTemplateName(CSE->getNamedConcept(), CSE->getTemplateArguments()); + Out << 'E'; + break; + } + // Proposed on https://github.com/itanium-cxx-abi/cxx-abi/issues/24. + NotPrimaryExpr(); + mangleUnresolvedName( + CSE->getNestedNameSpecifierLoc().getNestedNameSpecifier(), + CSE->getConceptNameInfo().getName(), + CSE->getTemplateArgsAsWritten()->getTemplateArgs(), + CSE->getTemplateArgsAsWritten()->getNumTemplateArgs()); + break; + } + + case Expr::RequiresExprClass: { + // Proposed on https://github.com/itanium-cxx-abi/cxx-abi/issues/24. + auto *RE = cast<RequiresExpr>(E); + // This is a primary-expression in the C++ grammar, but does not have an + // <expr-primary> mangling (starting with 'L'). + NotPrimaryExpr(); + if (RE->getLParenLoc().isValid()) { + Out << "rQ"; + FunctionTypeDepthState saved = FunctionTypeDepth.push(); + if (RE->getLocalParameters().empty()) { + Out << 'v'; + } else { + for (ParmVarDecl *Param : RE->getLocalParameters()) { + mangleType(Context.getASTContext().getSignatureParameterType( + Param->getType())); + } + } + Out << '_'; + + // The rest of the mangling is in the immediate scope of the parameters. + FunctionTypeDepth.enterResultType(); + for (const concepts::Requirement *Req : RE->getRequirements()) + mangleRequirement(RE->getExprLoc(), Req); + FunctionTypeDepth.pop(saved); + Out << 'E'; + } else { + Out << "rq"; + for (const concepts::Requirement *Req : RE->getRequirements()) + mangleRequirement(RE->getExprLoc(), Req); + Out << 'E'; + } break; } @@ -4953,6 +5600,16 @@ recurse: break; } + case Expr::LambdaExprClass: { + // A lambda-expression can't appear in the signature of an + // externally-visible declaration, so there's no standard mangling for + // this, but mangling as a literal of the closure type seems reasonable. + Out << "L"; + mangleType(Context.getASTContext().getRecordType(cast<LambdaExpr>(E)->getLambdaClass())); + Out << "E"; + break; + } + case Expr::PackExpansionExprClass: NotPrimaryExpr(); Out << "sp"; @@ -5170,28 +5827,116 @@ void CXXNameMangler::mangleCXXDtorType(CXXDtorType T) { } } -namespace { // Helper to provide ancillary information on a template used to mangle its // arguments. -struct TemplateArgManglingInfo { +struct CXXNameMangler::TemplateArgManglingInfo { + const CXXNameMangler &Mangler; TemplateDecl *ResolvedTemplate = nullptr; bool SeenPackExpansionIntoNonPack = false; const NamedDecl *UnresolvedExpandedPack = nullptr; - TemplateArgManglingInfo(TemplateName TN) { + TemplateArgManglingInfo(const CXXNameMangler &Mangler, TemplateName TN) + : Mangler(Mangler) { if (TemplateDecl *TD = TN.getAsTemplateDecl()) ResolvedTemplate = TD; } - /// Do we need to mangle template arguments with exactly correct types? - /// + /// Information about how to mangle a template argument. + struct Info { + /// Do we need to mangle the template argument with an exactly correct type? + bool NeedExactType; + /// If we need to prefix the mangling with a mangling of the template + /// parameter, the corresponding parameter. + const NamedDecl *TemplateParameterToMangle; + }; + + /// Determine whether the resolved template might be overloaded on its + /// template parameter list. If so, the mangling needs to include enough + /// information to reconstruct the template parameter list. + bool isOverloadable() { + // Function templates are generally overloadable. As a special case, a + // member function template of a generic lambda is not overloadable. + if (auto *FTD = dyn_cast_or_null<FunctionTemplateDecl>(ResolvedTemplate)) { + auto *RD = dyn_cast<CXXRecordDecl>(FTD->getDeclContext()); + if (!RD || !RD->isGenericLambda()) + return true; + } + + // All other templates are not overloadable. Partial specializations would + // be, but we never mangle them. + return false; + } + + /// Determine whether we need to prefix this <template-arg> mangling with a + /// <template-param-decl>. This happens if the natural template parameter for + /// the argument mangling is not the same as the actual template parameter. + bool needToMangleTemplateParam(const NamedDecl *Param, + const TemplateArgument &Arg) { + // For a template type parameter, the natural parameter is 'typename T'. + // The actual parameter might be constrained. + if (auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) + return TTP->hasTypeConstraint(); + + if (Arg.getKind() == TemplateArgument::Pack) { + // For an empty pack, the natural parameter is `typename...`. + if (Arg.pack_size() == 0) + return true; + + // For any other pack, we use the first argument to determine the natural + // template parameter. + return needToMangleTemplateParam(Param, *Arg.pack_begin()); + } + + // For a non-type template parameter, the natural parameter is `T V` (for a + // prvalue argument) or `T &V` (for a glvalue argument), where `T` is the + // type of the argument, which we require to exactly match. If the actual + // parameter has a deduced or instantiation-dependent type, it is not + // equivalent to the natural parameter. + if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) + return NTTP->getType()->isInstantiationDependentType() || + NTTP->getType()->getContainedDeducedType(); + + // For a template template parameter, the template-head might differ from + // that of the template. + auto *TTP = cast<TemplateTemplateParmDecl>(Param); + TemplateName ArgTemplateName = Arg.getAsTemplateOrTemplatePattern(); + const TemplateDecl *ArgTemplate = ArgTemplateName.getAsTemplateDecl(); + if (!ArgTemplate) + return true; + + // Mangle the template parameter list of the parameter and argument to see + // if they are the same. We can't use Profile for this, because it can't + // model the depth difference between parameter and argument and might not + // necessarily have the same definition of "identical" that we use here -- + // that is, same mangling. + auto MangleTemplateParamListToString = + [&](SmallVectorImpl<char> &Buffer, const TemplateParameterList *Params, + unsigned DepthOffset) { + llvm::raw_svector_ostream Stream(Buffer); + CXXNameMangler(Mangler.Context, Stream, + WithTemplateDepthOffset{DepthOffset}) + .mangleTemplateParameterList(Params); + }; + llvm::SmallString<128> ParamTemplateHead, ArgTemplateHead; + MangleTemplateParamListToString(ParamTemplateHead, + TTP->getTemplateParameters(), 0); + // Add the depth of the parameter's template parameter list to all + // parameters appearing in the argument to make the indexes line up + // properly. + MangleTemplateParamListToString(ArgTemplateHead, + ArgTemplate->getTemplateParameters(), + TTP->getTemplateParameters()->getDepth()); + return ParamTemplateHead != ArgTemplateHead; + } + + /// Determine information about how this template argument should be mangled. /// This should be called exactly once for each parameter / argument pair, in /// order. - bool needExactType(unsigned ParamIdx, const TemplateArgument &Arg) { + Info getArgInfo(unsigned ParamIdx, const TemplateArgument &Arg) { // We need correct types when the template-name is unresolved or when it // names a template that is able to be overloaded. if (!ResolvedTemplate || SeenPackExpansionIntoNonPack) - return true; + return {true, nullptr}; // Move to the next parameter. const NamedDecl *Param = UnresolvedExpandedPack; @@ -5200,13 +5945,14 @@ struct TemplateArgManglingInfo { "no parameter for argument"); Param = ResolvedTemplate->getTemplateParameters()->getParam(ParamIdx); - // If we reach an expanded parameter pack whose argument isn't in pack - // form, that means Sema couldn't figure out which arguments belonged to - // it, because it contains a pack expansion. Track the expanded pack for - // all further template arguments until we hit that pack expansion. + // If we reach a parameter pack whose argument isn't in pack form, that + // means Sema couldn't or didn't figure out which arguments belonged to + // it, because it contains a pack expansion or because Sema bailed out of + // computing parameter / argument correspondence before this point. Track + // the pack as the corresponding parameter for all further template + // arguments until we hit a pack expansion, at which point we don't know + // the correspondence between parameters and arguments at all. if (Param->isParameterPack() && Arg.getKind() != TemplateArgument::Pack) { - assert(getExpandedPackSize(Param) && - "failed to form pack argument for parameter pack"); UnresolvedExpandedPack = Param; } } @@ -5217,17 +5963,13 @@ struct TemplateArgManglingInfo { if (Arg.isPackExpansion() && (!Param->isParameterPack() || UnresolvedExpandedPack)) { SeenPackExpansionIntoNonPack = true; - return true; + return {true, nullptr}; } - // We need exact types for function template arguments because they might be - // overloaded on template parameter type. As a special case, a member - // function template of a generic lambda is not overloadable. - if (auto *FTD = dyn_cast<FunctionTemplateDecl>(ResolvedTemplate)) { - auto *RD = dyn_cast<CXXRecordDecl>(FTD->getDeclContext()); - if (!RD || !RD->isGenericLambda()) - return true; - } + // We need exact types for arguments of a template that might be overloaded + // on template parameter type. + if (isOverloadable()) + return {true, needToMangleTemplateParam(Param, Arg) ? Param : nullptr}; // Otherwise, we only need a correct type if the parameter has a deduced // type. @@ -5237,44 +5979,75 @@ struct TemplateArgManglingInfo { // but it doesn't matter because substitution and expansion don't affect // whether a deduced type appears in the type. auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param); - return NTTP && NTTP->getType()->getContainedDeducedType(); + bool NeedExactType = NTTP && NTTP->getType()->getContainedDeducedType(); + return {NeedExactType, nullptr}; + } + + /// Determine if we should mangle a requires-clause after the template + /// argument list. If so, returns the expression to mangle. + const Expr *getTrailingRequiresClauseToMangle() { + if (!isOverloadable()) + return nullptr; + return ResolvedTemplate->getTemplateParameters()->getRequiresClause(); } }; -} void CXXNameMangler::mangleTemplateArgs(TemplateName TN, const TemplateArgumentLoc *TemplateArgs, unsigned NumTemplateArgs) { - // <template-args> ::= I <template-arg>+ E + // <template-args> ::= I <template-arg>+ [Q <requires-clause expr>] E Out << 'I'; - TemplateArgManglingInfo Info(TN); - for (unsigned i = 0; i != NumTemplateArgs; ++i) - mangleTemplateArg(TemplateArgs[i].getArgument(), - Info.needExactType(i, TemplateArgs[i].getArgument())); + TemplateArgManglingInfo Info(*this, TN); + for (unsigned i = 0; i != NumTemplateArgs; ++i) { + mangleTemplateArg(Info, i, TemplateArgs[i].getArgument()); + } + mangleRequiresClause(Info.getTrailingRequiresClauseToMangle()); Out << 'E'; } void CXXNameMangler::mangleTemplateArgs(TemplateName TN, const TemplateArgumentList &AL) { - // <template-args> ::= I <template-arg>+ E + // <template-args> ::= I <template-arg>+ [Q <requires-clause expr>] E Out << 'I'; - TemplateArgManglingInfo Info(TN); - for (unsigned i = 0, e = AL.size(); i != e; ++i) - mangleTemplateArg(AL[i], Info.needExactType(i, AL[i])); + TemplateArgManglingInfo Info(*this, TN); + for (unsigned i = 0, e = AL.size(); i != e; ++i) { + mangleTemplateArg(Info, i, AL[i]); + } + mangleRequiresClause(Info.getTrailingRequiresClauseToMangle()); Out << 'E'; } void CXXNameMangler::mangleTemplateArgs(TemplateName TN, - const TemplateArgument *TemplateArgs, - unsigned NumTemplateArgs) { - // <template-args> ::= I <template-arg>+ E + ArrayRef<TemplateArgument> Args) { + // <template-args> ::= I <template-arg>+ [Q <requires-clause expr>] E Out << 'I'; - TemplateArgManglingInfo Info(TN); - for (unsigned i = 0; i != NumTemplateArgs; ++i) - mangleTemplateArg(TemplateArgs[i], Info.needExactType(i, TemplateArgs[i])); + TemplateArgManglingInfo Info(*this, TN); + for (unsigned i = 0; i != Args.size(); ++i) { + mangleTemplateArg(Info, i, Args[i]); + } + mangleRequiresClause(Info.getTrailingRequiresClauseToMangle()); Out << 'E'; } +void CXXNameMangler::mangleTemplateArg(TemplateArgManglingInfo &Info, + unsigned Index, TemplateArgument A) { + TemplateArgManglingInfo::Info ArgInfo = Info.getArgInfo(Index, A); + + // Proposed on https://github.com/itanium-cxx-abi/cxx-abi/issues/47. + if (ArgInfo.TemplateParameterToMangle && + !isCompatibleWith(LangOptions::ClangABI::Ver17)) { + // The template parameter is mangled if the mangling would otherwise be + // ambiguous. + // + // <template-arg> ::= <template-param-decl> <template-arg> + // + // Clang 17 and before did not do this. + mangleTemplateParamDecl(ArgInfo.TemplateParameterToMangle); + } + + mangleTemplateArg(A, ArgInfo.NeedExactType); +} + void CXXNameMangler::mangleTemplateArg(TemplateArgument A, bool NeedExactType) { // <template-arg> ::= <type> # type or template // ::= X <expression> E # expression @@ -5327,8 +6100,7 @@ void CXXNameMangler::mangleTemplateArg(TemplateArgument A, bool NeedExactType) { else if (D->getType()->isArrayType() && Ctx.hasSimilarType(Ctx.getDecayedType(D->getType()), A.getParamTypeForDecl()) && - Ctx.getLangOpts().getClangABICompat() > - LangOptions::ClangABI::Ver11) + !isCompatibleWith(LangOptions::ClangABI::Ver11)) // Build a value corresponding to this implicit array-to-pointer decay. Value = APValue(APValue::LValueBase(D), CharUnits::Zero(), {APValue::LValuePathEntry::ArrayIndex(0)}, @@ -5346,6 +6118,11 @@ void CXXNameMangler::mangleTemplateArg(TemplateArgument A, bool NeedExactType) { mangleNullPointer(A.getNullPtrType()); break; } + case TemplateArgument::StructuralValue: + mangleValueInTemplateArg(A.getStructuralValueType(), + A.getAsStructuralValue(), + /*TopLevel=*/true, NeedExactType); + break; case TemplateArgument::Pack: { // <template-arg> ::= J <template-arg>* E Out << 'J'; @@ -5357,8 +6134,7 @@ void CXXNameMangler::mangleTemplateArg(TemplateArgument A, bool NeedExactType) { } void CXXNameMangler::mangleTemplateArgExpr(const Expr *E) { - ASTContext &Ctx = Context.getASTContext(); - if (Ctx.getLangOpts().getClangABICompat() > LangOptions::ClangABI::Ver11) { + if (!isCompatibleWith(LangOptions::ClangABI::Ver11)) { mangleExpression(E, UnknownArity, /*AsTemplateArg=*/true); return; } @@ -5414,7 +6190,7 @@ static bool isZeroInitialized(QualType T, const APValue &V) { } I = 0; for (const FieldDecl *FD : RD->fields()) { - if (!FD->isUnnamedBitfield() && + if (!FD->isUnnamedBitField() && !isZeroInitialized(FD->getType(), V.getStructField(I))) return false; ++I; @@ -5427,7 +6203,7 @@ static bool isZeroInitialized(QualType T, const APValue &V) { assert(RD && "unexpected type for union value"); // Zero-initialization zeroes the first non-unnamed-bitfield field, if any. for (const FieldDecl *FD : RD->fields()) { - if (!FD->isUnnamedBitfield()) + if (!FD->isUnnamedBitField()) return V.getUnionField() && declaresSameEntity(FD, V.getUnionField()) && isZeroInitialized(FD->getType(), V.getUnionValue()); } @@ -5493,6 +6269,47 @@ static QualType getLValueType(ASTContext &Ctx, const APValue &LV) { return T; } +static IdentifierInfo *getUnionInitName(SourceLocation UnionLoc, + DiagnosticsEngine &Diags, + const FieldDecl *FD) { + // According to: + // http://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling.anonymous + // For the purposes of mangling, the name of an anonymous union is considered + // to be the name of the first named data member found by a pre-order, + // depth-first, declaration-order walk of the data members of the anonymous + // union. + + if (FD->getIdentifier()) + return FD->getIdentifier(); + + // The only cases where the identifer of a FieldDecl would be blank is if the + // field represents an anonymous record type or if it is an unnamed bitfield. + // There is no type to descend into in the case of a bitfield, so we can just + // return nullptr in that case. + if (FD->isBitField()) + return nullptr; + const CXXRecordDecl *RD = FD->getType()->getAsCXXRecordDecl(); + + // Consider only the fields in declaration order, searched depth-first. We + // don't care about the active member of the union, as all we are doing is + // looking for a valid name. We also don't check bases, due to guidance from + // the Itanium ABI folks. + for (const FieldDecl *RDField : RD->fields()) { + if (IdentifierInfo *II = getUnionInitName(UnionLoc, Diags, RDField)) + return II; + } + + // According to the Itanium ABI: If there is no such data member (i.e., if all + // of the data members in the union are unnamed), then there is no way for a + // program to refer to the anonymous union, and there is therefore no need to + // mangle its name. However, we should diagnose this anyway. + unsigned DiagID = Diags.getCustomDiagID( + DiagnosticsEngine::Error, "cannot mangle this unnamed union NTTP yet"); + Diags.Report(UnionLoc, DiagID); + + return nullptr; +} + void CXXNameMangler::mangleValueInTemplateArg(QualType T, const APValue &V, bool TopLevel, bool NeedExactType) { @@ -5525,11 +6342,10 @@ void CXXNameMangler::mangleValueInTemplateArg(QualType T, const APValue &V, assert(RD && "unexpected type for record value"); // Drop trailing zero-initialized elements. - llvm::SmallVector<const FieldDecl *, 16> Fields(RD->field_begin(), - RD->field_end()); + llvm::SmallVector<const FieldDecl *, 16> Fields(RD->fields()); while ( !Fields.empty() && - (Fields.back()->isUnnamedBitfield() || + (Fields.back()->isUnnamedBitField() || isZeroInitialized(Fields.back()->getType(), V.getStructField(Fields.back()->getFieldIndex())))) { Fields.pop_back(); @@ -5549,7 +6365,7 @@ void CXXNameMangler::mangleValueInTemplateArg(QualType T, const APValue &V, for (unsigned I = 0, N = Bases.size(); I != N; ++I) mangleValueInTemplateArg(Bases[I].getType(), V.getStructBase(I), false); for (unsigned I = 0, N = Fields.size(); I != N; ++I) { - if (Fields[I]->isUnnamedBitfield()) + if (Fields[I]->isUnnamedBitField()) continue; mangleValueInTemplateArg(Fields[I]->getType(), V.getStructField(Fields[I]->getFieldIndex()), @@ -5576,7 +6392,10 @@ void CXXNameMangler::mangleValueInTemplateArg(QualType T, const APValue &V, mangleType(T); if (!isZeroInitialized(T, V)) { Out << "di"; - mangleSourceName(FD->getIdentifier()); + IdentifierInfo *II = (getUnionInitName( + T->getAsCXXRecordDecl()->getLocation(), Context.getDiags(), FD)); + if (II) + mangleSourceName(II); mangleValueInTemplateArg(FD->getType(), V.getUnionValue(), false); } Out << 'E'; @@ -5712,7 +6531,20 @@ void CXXNameMangler::mangleValueInTemplateArg(QualType T, const APValue &V, Out << "plcvPcad"; Kind = Offset; } else { - if (!V.getLValuePath().empty() || V.isLValueOnePastTheEnd()) { + // Clang 11 and before mangled an array subject to array-to-pointer decay + // as if it were the declaration itself. + bool IsArrayToPointerDecayMangledAsDecl = false; + if (TopLevel && Ctx.getLangOpts().getClangABICompat() <= + LangOptions::ClangABI::Ver11) { + QualType BType = B.getType(); + IsArrayToPointerDecayMangledAsDecl = + BType->isArrayType() && V.getLValuePath().size() == 1 && + V.getLValuePath()[0].getAsArrayIndex() == 0 && + Ctx.hasSimilarType(T, Ctx.getDecayedType(BType)); + } + + if ((!V.getLValuePath().empty() || V.isLValueOnePastTheEnd()) && + !IsArrayToPointerDecayMangledAsDecl) { NotPrimaryExpr(); // A final conversion to the template parameter's type is usually // folded into the 'so' mangling, but we can't do that for 'void*' @@ -5731,8 +6563,7 @@ void CXXNameMangler::mangleValueInTemplateArg(QualType T, const APValue &V, } else { if (NeedExactType && !Ctx.hasSameType(T->getPointeeType(), getLValueType(Ctx, V)) && - Ctx.getLangOpts().getClangABICompat() > - LangOptions::ClangABI::Ver11) { + !isCompatibleWith(LangOptions::ClangABI::Ver11)) { NotPrimaryExpr(); Out << "cv"; mangleType(T); @@ -5830,8 +6661,7 @@ void CXXNameMangler::mangleValueInTemplateArg(QualType T, const APValue &V, !Ctx.hasSameType( T->castAs<MemberPointerType>()->getPointeeType(), V.getMemberPointerDecl()->getType()) && - Ctx.getLangOpts().getClangABICompat() > - LangOptions::ClangABI::Ver11) { + !isCompatibleWith(LangOptions::ClangABI::Ver11)) { Out << "cv"; mangleType(T); } @@ -5862,6 +6692,7 @@ void CXXNameMangler::mangleTemplateParameter(unsigned Depth, unsigned Index) { // The latter two manglings are from a proposal here: // https://github.com/itanium-cxx-abi/cxx-abi/issues/31#issuecomment-528122117 Out << 'T'; + Depth += TemplateDepthOffset; if (Depth != 0) Out << 'L' << (Depth - 1) << '_'; if (Index != 0) @@ -5870,9 +6701,11 @@ void CXXNameMangler::mangleTemplateParameter(unsigned Depth, unsigned Index) { } void CXXNameMangler::mangleSeqID(unsigned SeqID) { - if (SeqID == 1) + if (SeqID == 0) { + // Nothing. + } else if (SeqID == 1) { Out << '0'; - else if (SeqID > 1) { + } else { SeqID--; // <seq-id> is encoded in base-36, using digits and upper case letters. @@ -5907,6 +6740,14 @@ bool CXXNameMangler::mangleSubstitution(const NamedDecl *ND) { return mangleSubstitution(reinterpret_cast<uintptr_t>(ND)); } +bool CXXNameMangler::mangleSubstitution(NestedNameSpecifier *NNS) { + assert(NNS->getKind() == NestedNameSpecifier::Identifier && + "mangleSubstitution(NestedNameSpecifier *) is only used for " + "identifier nested name specifiers."); + NNS = Context.getASTContext().getCanonicalNestedNameSpecifier(NNS); + return mangleSubstitution(reinterpret_cast<uintptr_t>(NNS)); +} + /// Determine whether the given type has any qualifiers that are relevant for /// substitutions. static bool hasMangledSubstitutionQualifiers(QualType T) { @@ -5946,56 +6787,67 @@ bool CXXNameMangler::mangleSubstitution(uintptr_t Ptr) { return true; } -static bool isCharType(QualType T) { - if (T.isNull()) - return false; - - return T->isSpecificBuiltinType(BuiltinType::Char_S) || - T->isSpecificBuiltinType(BuiltinType::Char_U); -} - -/// Returns whether a given type is a template specialization of a given name -/// with a single argument of type char. -static bool isCharSpecialization(QualType T, const char *Name) { - if (T.isNull()) +/// Returns whether S is a template specialization of std::Name with a single +/// argument of type A. +bool CXXNameMangler::isSpecializedAs(QualType S, llvm::StringRef Name, + QualType A) { + if (S.isNull()) return false; - const RecordType *RT = T->getAs<RecordType>(); + const RecordType *RT = S->getAs<RecordType>(); if (!RT) return false; const ClassTemplateSpecializationDecl *SD = dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl()); - if (!SD) + if (!SD || !SD->getIdentifier()->isStr(Name)) return false; - if (!isStdNamespace(getEffectiveDeclContext(SD))) + if (!isStdNamespace(Context.getEffectiveDeclContext(SD))) return false; const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs(); if (TemplateArgs.size() != 1) return false; - if (!isCharType(TemplateArgs[0].getAsType())) + if (TemplateArgs[0].getAsType() != A) return false; - return SD->getIdentifier()->getName() == Name; + if (SD->getSpecializedTemplate()->getOwningModuleForLinkage()) + return false; + + return true; } -template <std::size_t StrLen> -static bool isStreamCharSpecialization(const ClassTemplateSpecializationDecl*SD, - const char (&Str)[StrLen]) { - if (!SD->getIdentifier()->isStr(Str)) +/// Returns whether SD is a template specialization std::Name<char, +/// std::char_traits<char> [, std::allocator<char>]> +/// HasAllocator controls whether the 3rd template argument is needed. +bool CXXNameMangler::isStdCharSpecialization( + const ClassTemplateSpecializationDecl *SD, llvm::StringRef Name, + bool HasAllocator) { + if (!SD->getIdentifier()->isStr(Name)) return false; const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs(); - if (TemplateArgs.size() != 2) + if (TemplateArgs.size() != (HasAllocator ? 3 : 2)) + return false; + + QualType A = TemplateArgs[0].getAsType(); + if (A.isNull()) + return false; + // Plain 'char' is named Char_S or Char_U depending on the target ABI. + if (!A->isSpecificBuiltinType(BuiltinType::Char_S) && + !A->isSpecificBuiltinType(BuiltinType::Char_U)) + return false; + + if (!isSpecializedAs(TemplateArgs[1].getAsType(), "char_traits", A)) return false; - if (!isCharType(TemplateArgs[0].getAsType())) + if (HasAllocator && + !isSpecializedAs(TemplateArgs[2].getAsType(), "allocator", A)) return false; - if (!isCharSpecialization(TemplateArgs[1].getAsType(), "char_traits")) + if (SD->getSpecializedTemplate()->getOwningModuleForLinkage()) return false; return true; @@ -6008,10 +6860,14 @@ bool CXXNameMangler::mangleStandardSubstitution(const NamedDecl *ND) { Out << "St"; return true; } + return false; } if (const ClassTemplateDecl *TD = dyn_cast<ClassTemplateDecl>(ND)) { - if (!isStdNamespace(getEffectiveDeclContext(TD))) + if (!isStdNamespace(Context.getEffectiveDeclContext(TD))) + return false; + + if (TD->getOwningModuleForLinkage()) return false; // <substitution> ::= Sa # ::std::allocator @@ -6025,56 +6881,48 @@ bool CXXNameMangler::mangleStandardSubstitution(const NamedDecl *ND) { Out << "Sb"; return true; } + return false; } if (const ClassTemplateSpecializationDecl *SD = dyn_cast<ClassTemplateSpecializationDecl>(ND)) { - if (!isStdNamespace(getEffectiveDeclContext(SD))) + if (!isStdNamespace(Context.getEffectiveDeclContext(SD))) + return false; + + if (SD->getSpecializedTemplate()->getOwningModuleForLinkage()) return false; // <substitution> ::= Ss # ::std::basic_string<char, // ::std::char_traits<char>, // ::std::allocator<char> > - if (SD->getIdentifier()->isStr("basic_string")) { - const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs(); - - if (TemplateArgs.size() != 3) - return false; - - if (!isCharType(TemplateArgs[0].getAsType())) - return false; - - if (!isCharSpecialization(TemplateArgs[1].getAsType(), "char_traits")) - return false; - - if (!isCharSpecialization(TemplateArgs[2].getAsType(), "allocator")) - return false; - + if (isStdCharSpecialization(SD, "basic_string", /*HasAllocator=*/true)) { Out << "Ss"; return true; } // <substitution> ::= Si # ::std::basic_istream<char, // ::std::char_traits<char> > - if (isStreamCharSpecialization(SD, "basic_istream")) { + if (isStdCharSpecialization(SD, "basic_istream", /*HasAllocator=*/false)) { Out << "Si"; return true; } // <substitution> ::= So # ::std::basic_ostream<char, // ::std::char_traits<char> > - if (isStreamCharSpecialization(SD, "basic_ostream")) { + if (isStdCharSpecialization(SD, "basic_ostream", /*HasAllocator=*/false)) { Out << "So"; return true; } // <substitution> ::= Sd # ::std::basic_iostream<char, // ::std::char_traits<char> > - if (isStreamCharSpecialization(SD, "basic_iostream")) { + if (isStdCharSpecialization(SD, "basic_iostream", /*HasAllocator=*/false)) { Out << "Sd"; return true; } + return false; } + return false; } @@ -6203,8 +7051,78 @@ void ItaniumMangleContextImpl::mangleCXXDtorComdat(const CXXDestructorDecl *D, Mangler.mangle(GlobalDecl(D, Dtor_Comdat)); } +/// Mangles the pointer authentication override attribute for classes +/// that have explicit overrides for the vtable authentication schema. +/// +/// The override is mangled as a parameterized vendor extension as follows +/// +/// <type> ::= U "__vtptrauth" I +/// <key> +/// <addressDiscriminated> +/// <extraDiscriminator> +/// E +/// +/// The extra discriminator encodes the explicit value derived from the +/// override schema, e.g. if the override has specified type based +/// discrimination the encoded value will be the discriminator derived from the +/// type name. +static void mangleOverrideDiscrimination(CXXNameMangler &Mangler, + ASTContext &Context, + const ThunkInfo &Thunk) { + auto &LangOpts = Context.getLangOpts(); + const CXXRecordDecl *ThisRD = Thunk.ThisType->getPointeeCXXRecordDecl(); + const CXXRecordDecl *PtrauthClassRD = + Context.baseForVTableAuthentication(ThisRD); + unsigned TypedDiscriminator = + Context.getPointerAuthVTablePointerDiscriminator(ThisRD); + Mangler.mangleVendorQualifier("__vtptrauth"); + auto &ManglerStream = Mangler.getStream(); + ManglerStream << "I"; + if (const auto *ExplicitAuth = + PtrauthClassRD->getAttr<VTablePointerAuthenticationAttr>()) { + ManglerStream << "Lj" << ExplicitAuth->getKey(); + + if (ExplicitAuth->getAddressDiscrimination() == + VTablePointerAuthenticationAttr::DefaultAddressDiscrimination) + ManglerStream << "Lb" << LangOpts.PointerAuthVTPtrAddressDiscrimination; + else + ManglerStream << "Lb" + << (ExplicitAuth->getAddressDiscrimination() == + VTablePointerAuthenticationAttr::AddressDiscrimination); + + switch (ExplicitAuth->getExtraDiscrimination()) { + case VTablePointerAuthenticationAttr::DefaultExtraDiscrimination: { + if (LangOpts.PointerAuthVTPtrTypeDiscrimination) + ManglerStream << "Lj" << TypedDiscriminator; + else + ManglerStream << "Lj" << 0; + break; + } + case VTablePointerAuthenticationAttr::TypeDiscrimination: + ManglerStream << "Lj" << TypedDiscriminator; + break; + case VTablePointerAuthenticationAttr::CustomDiscrimination: + ManglerStream << "Lj" << ExplicitAuth->getCustomDiscriminationValue(); + break; + case VTablePointerAuthenticationAttr::NoExtraDiscrimination: + ManglerStream << "Lj" << 0; + break; + } + } else { + ManglerStream << "Lj" + << (unsigned)VTablePointerAuthenticationAttr::DefaultKey; + ManglerStream << "Lb" << LangOpts.PointerAuthVTPtrAddressDiscrimination; + if (LangOpts.PointerAuthVTPtrTypeDiscrimination) + ManglerStream << "Lj" << TypedDiscriminator; + else + ManglerStream << "Lj" << 0; + } + ManglerStream << "E"; +} + void ItaniumMangleContextImpl::mangleThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk, + bool ElideOverrideInfo, raw_ostream &Out) { // <special-name> ::= T <call-offset> <base encoding> // # base is the nominal target function of thunk @@ -6230,21 +7148,28 @@ void ItaniumMangleContextImpl::mangleThunk(const CXXMethodDecl *MD, Thunk.Return.Virtual.Itanium.VBaseOffsetOffset); Mangler.mangleFunctionEncoding(MD); + if (!ElideOverrideInfo) + mangleOverrideDiscrimination(Mangler, getASTContext(), Thunk); } -void ItaniumMangleContextImpl::mangleCXXDtorThunk( - const CXXDestructorDecl *DD, CXXDtorType Type, - const ThisAdjustment &ThisAdjustment, raw_ostream &Out) { +void ItaniumMangleContextImpl::mangleCXXDtorThunk(const CXXDestructorDecl *DD, + CXXDtorType Type, + const ThunkInfo &Thunk, + bool ElideOverrideInfo, + raw_ostream &Out) { // <special-name> ::= T <call-offset> <base encoding> // # base is the nominal target function of thunk CXXNameMangler Mangler(*this, Out, DD, Type); Mangler.getStream() << "_ZT"; + auto &ThisAdjustment = Thunk.This; // Mangle the 'this' pointer adjustment. Mangler.mangleCallOffset(ThisAdjustment.NonVirtual, ThisAdjustment.Virtual.Itanium.VCallOffsetOffset); Mangler.mangleFunctionEncoding(GlobalDecl(DD, Type)); + if (!ElideOverrideInfo) + mangleOverrideDiscrimination(Mangler, getASTContext(), Thunk); } /// Returns the mangled name for a guard variable for the passed in VarDecl. @@ -6291,23 +7216,25 @@ void ItaniumMangleContextImpl::mangleDynamicStermFinalizer(const VarDecl *D, } void ItaniumMangleContextImpl::mangleSEHFilterExpression( - const NamedDecl *EnclosingDecl, raw_ostream &Out) { + GlobalDecl EnclosingDecl, raw_ostream &Out) { CXXNameMangler Mangler(*this, Out); Mangler.getStream() << "__filt_"; - if (shouldMangleDeclName(EnclosingDecl)) + auto *EnclosingFD = cast<FunctionDecl>(EnclosingDecl.getDecl()); + if (shouldMangleDeclName(EnclosingFD)) Mangler.mangle(EnclosingDecl); else - Mangler.getStream() << EnclosingDecl->getName(); + Mangler.getStream() << EnclosingFD->getName(); } void ItaniumMangleContextImpl::mangleSEHFinallyBlock( - const NamedDecl *EnclosingDecl, raw_ostream &Out) { + GlobalDecl EnclosingDecl, raw_ostream &Out) { CXXNameMangler Mangler(*this, Out); Mangler.getStream() << "__fin_"; - if (shouldMangleDeclName(EnclosingDecl)) + auto *EnclosingFD = cast<FunctionDecl>(EnclosingDecl.getDecl()); + if (shouldMangleDeclName(EnclosingFD)) Mangler.mangle(EnclosingDecl); else - Mangler.getStream() << EnclosingDecl->getName(); + Mangler.getStream() << EnclosingFD->getName(); } void ItaniumMangleContextImpl::mangleItaniumThreadLocalInit(const VarDecl *D, @@ -6376,16 +7303,17 @@ void ItaniumMangleContextImpl::mangleCXXRTTI(QualType Ty, raw_ostream &Out) { Mangler.mangleType(Ty); } -void ItaniumMangleContextImpl::mangleCXXRTTIName(QualType Ty, - raw_ostream &Out) { +void ItaniumMangleContextImpl::mangleCXXRTTIName( + QualType Ty, raw_ostream &Out, bool NormalizeIntegers = false) { // <special-name> ::= TS <type> # typeinfo name (null terminated byte string) - CXXNameMangler Mangler(*this, Out); + CXXNameMangler Mangler(*this, Out, NormalizeIntegers); Mangler.getStream() << "_ZTS"; Mangler.mangleType(Ty); } -void ItaniumMangleContextImpl::mangleTypeName(QualType Ty, raw_ostream &Out) { - mangleCXXRTTIName(Ty, Out); +void ItaniumMangleContextImpl::mangleCanonicalTypeName( + QualType Ty, raw_ostream &Out, bool NormalizeIntegers = false) { + mangleCXXRTTIName(Ty, Out, NormalizeIntegers); } void ItaniumMangleContextImpl::mangleStringLiteral(const StringLiteral *, raw_ostream &) { @@ -6398,17 +7326,36 @@ void ItaniumMangleContextImpl::mangleLambdaSig(const CXXRecordDecl *Lambda, Mangler.mangleLambdaSig(Lambda); } +void ItaniumMangleContextImpl::mangleModuleInitializer(const Module *M, + raw_ostream &Out) { + // <special-name> ::= GI <module-name> # module initializer function + CXXNameMangler Mangler(*this, Out); + Mangler.getStream() << "_ZGI"; + Mangler.mangleModuleNamePrefix(M->getPrimaryModuleInterfaceName()); + if (M->isModulePartition()) { + // The partition needs including, as partitions can have them too. + auto Partition = M->Name.find(':'); + Mangler.mangleModuleNamePrefix( + StringRef(&M->Name[Partition + 1], M->Name.size() - Partition - 1), + /*IsPartition*/ true); + } +} + ItaniumMangleContext *ItaniumMangleContext::create(ASTContext &Context, - DiagnosticsEngine &Diags) { + DiagnosticsEngine &Diags, + bool IsAux) { return new ItaniumMangleContextImpl( Context, Diags, - [](ASTContext &, const NamedDecl *) -> llvm::Optional<unsigned> { - return llvm::None; - }); + [](ASTContext &, const NamedDecl *) -> std::optional<unsigned> { + return std::nullopt; + }, + IsAux); } ItaniumMangleContext * ItaniumMangleContext::create(ASTContext &Context, DiagnosticsEngine &Diags, - DiscriminatorOverrideTy DiscriminatorOverride) { - return new ItaniumMangleContextImpl(Context, Diags, DiscriminatorOverride); + DiscriminatorOverrideTy DiscriminatorOverride, + bool IsAux) { + return new ItaniumMangleContextImpl(Context, Diags, DiscriminatorOverride, + IsAux); } diff --git a/contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp b/contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp index f09f9d38759f..eeb314b8d32b 100644 --- a/contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp +++ b/contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp @@ -1,8 +1,10 @@ #include "clang/AST/JSONNodeDumper.h" +#include "clang/AST/Type.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/Lexer.h" -#include "llvm/ADT/StringSwitch.h" +#include "llvm/ADT/StringExtras.h" +#include <optional> using namespace clang; @@ -75,7 +77,7 @@ void JSONNodeDumper::Visit(const Type *T) { return; JOS.attribute("kind", (llvm::Twine(T->getTypeClassName()) + "Type").str()); - JOS.attribute("type", createQualType(QualType(T, 0), /*Desugar*/ false)); + JOS.attribute("type", createQualType(QualType(T, 0), /*Desugar=*/false)); attributeOnlyIfTrue("containsErrors", T->containsErrors()); attributeOnlyIfTrue("isDependent", T->isDependentType()); attributeOnlyIfTrue("isInstantiationDependent", @@ -94,6 +96,21 @@ void JSONNodeDumper::Visit(QualType T) { JOS.attribute("qualifiers", T.split().Quals.getAsString()); } +void JSONNodeDumper::Visit(TypeLoc TL) { + if (TL.isNull()) + return; + JOS.attribute("kind", + (llvm::Twine(TL.getTypeLocClass() == TypeLoc::Qualified + ? "Qualified" + : TL.getTypePtr()->getTypeClassName()) + + "TypeLoc") + .str()); + JOS.attribute("type", + createQualType(QualType(TL.getType()), /*Desugar=*/false)); + JOS.attributeObject("range", + [TL, this] { writeSourceRange(TL.getSourceRange()); }); +} + void JSONNodeDumper::Visit(const Decl *D) { JOS.attribute("id", createPointerRepresentation(D)); @@ -170,6 +187,8 @@ void JSONNodeDumper::Visit(const CXXCtorInitializer *Init) { llvm_unreachable("Unknown initializer type"); } +void JSONNodeDumper::Visit(const OpenACCClause *C) {} + void JSONNodeDumper::Visit(const OMPClause *C) {} void JSONNodeDumper::Visit(const BlockDecl::Capture &C) { @@ -218,7 +237,23 @@ void JSONNodeDumper::Visit(const APValue &Value, QualType Ty) { std::string Str; llvm::raw_string_ostream OS(Str); Value.printPretty(OS, Ctx, Ty); - JOS.attribute("value", OS.str()); + JOS.attribute("value", Str); +} + +void JSONNodeDumper::Visit(const ConceptReference *CR) { + JOS.attribute("kind", "ConceptReference"); + JOS.attribute("id", createPointerRepresentation(CR->getNamedConcept())); + if (const auto *Args = CR->getTemplateArgsAsWritten()) { + JOS.attributeArray("templateArgsAsWritten", [Args, this] { + for (const TemplateArgumentLoc &TAL : Args->arguments()) + JOS.object( + [&TAL, this] { Visit(TAL.getArgument(), TAL.getSourceRange()); }); + }); + } + JOS.attributeObject("loc", + [CR, this] { writeSourceLocation(CR->getLocation()); }); + JOS.attributeObject("range", + [CR, this] { writeSourceRange(CR->getSourceRange()); }); } void JSONNodeDumper::writeIncludeStack(PresumedLoc Loc, bool JustFirst) { @@ -313,12 +348,16 @@ std::string JSONNodeDumper::createPointerRepresentation(const void *Ptr) { llvm::json::Object JSONNodeDumper::createQualType(QualType QT, bool Desugar) { SplitQualType SQT = QT.split(); - llvm::json::Object Ret{{"qualType", QualType::getAsString(SQT, PrintPolicy)}}; + std::string SQTS = QualType::getAsString(SQT, PrintPolicy); + llvm::json::Object Ret{{"qualType", SQTS}}; if (Desugar && !QT.isNull()) { SplitQualType DSQT = QT.getSplitDesugaredType(); - if (DSQT != SQT) - Ret["desugaredQualType"] = QualType::getAsString(DSQT, PrintPolicy); + if (DSQT != SQT) { + std::string DSQTS = QualType::getAsString(DSQT, PrintPolicy); + if (DSQTS != SQTS) + Ret["desugaredQualType"] = DSQTS; + } if (const auto *TT = QT->getAs<TypedefType>()) Ret["typeAliasDeclId"] = createPointerRepresentation(TT->getDecl()); } @@ -528,8 +567,49 @@ JSONNodeDumper::createCXXBaseSpecifier(const CXXBaseSpecifier &BS) { return Ret; } +void JSONNodeDumper::VisitAliasAttr(const AliasAttr *AA) { + JOS.attribute("aliasee", AA->getAliasee()); +} + +void JSONNodeDumper::VisitCleanupAttr(const CleanupAttr *CA) { + JOS.attribute("cleanup_function", createBareDeclRef(CA->getFunctionDecl())); +} + +void JSONNodeDumper::VisitDeprecatedAttr(const DeprecatedAttr *DA) { + if (!DA->getMessage().empty()) + JOS.attribute("message", DA->getMessage()); + if (!DA->getReplacement().empty()) + JOS.attribute("replacement", DA->getReplacement()); +} + +void JSONNodeDumper::VisitUnavailableAttr(const UnavailableAttr *UA) { + if (!UA->getMessage().empty()) + JOS.attribute("message", UA->getMessage()); +} + +void JSONNodeDumper::VisitSectionAttr(const SectionAttr *SA) { + JOS.attribute("section_name", SA->getName()); +} + +void JSONNodeDumper::VisitVisibilityAttr(const VisibilityAttr *VA) { + JOS.attribute("visibility", VisibilityAttr::ConvertVisibilityTypeToStr( + VA->getVisibility())); +} + +void JSONNodeDumper::VisitTLSModelAttr(const TLSModelAttr *TA) { + JOS.attribute("tls_model", TA->getModel()); +} + void JSONNodeDumper::VisitTypedefType(const TypedefType *TT) { JOS.attribute("decl", createBareDeclRef(TT->getDecl())); + if (!TT->typeMatchesDecl()) + JOS.attribute("type", createQualType(TT->desugar())); +} + +void JSONNodeDumper::VisitUsingType(const UsingType *TT) { + JOS.attribute("decl", createBareDeclRef(TT->getFoundDecl())); + if (!TT->typeMatchesDecl()) + JOS.attribute("type", createQualType(TT->desugar())); } void JSONNodeDumper::VisitFunctionType(const FunctionType *T) { @@ -599,13 +679,13 @@ void JSONNodeDumper::VisitRValueReferenceType(const ReferenceType *RT) { void JSONNodeDumper::VisitArrayType(const ArrayType *AT) { switch (AT->getSizeModifier()) { - case ArrayType::Star: + case ArraySizeModifier::Star: JOS.attribute("sizeModifier", "*"); break; - case ArrayType::Static: + case ArraySizeModifier::Static: JOS.attribute("sizeModifier", "static"); break; - case ArrayType::Normal: + case ArraySizeModifier::Normal: break; } @@ -617,7 +697,7 @@ void JSONNodeDumper::VisitArrayType(const ArrayType *AT) { void JSONNodeDumper::VisitConstantArrayType(const ConstantArrayType *CAT) { // FIXME: this should use ZExt instead of SExt, but JSON doesn't allow a // narrowing conversion to int64_t so it cannot be expressed. - JOS.attribute("size", CAT->getSize().getSExtValue()); + JOS.attribute("size", CAT->getSExtSize()); VisitArrayType(CAT); } @@ -630,29 +710,35 @@ void JSONNodeDumper::VisitDependentSizedExtVectorType( void JSONNodeDumper::VisitVectorType(const VectorType *VT) { JOS.attribute("numElements", VT->getNumElements()); switch (VT->getVectorKind()) { - case VectorType::GenericVector: + case VectorKind::Generic: break; - case VectorType::AltiVecVector: + case VectorKind::AltiVecVector: JOS.attribute("vectorKind", "altivec"); break; - case VectorType::AltiVecPixel: + case VectorKind::AltiVecPixel: JOS.attribute("vectorKind", "altivec pixel"); break; - case VectorType::AltiVecBool: + case VectorKind::AltiVecBool: JOS.attribute("vectorKind", "altivec bool"); break; - case VectorType::NeonVector: + case VectorKind::Neon: JOS.attribute("vectorKind", "neon"); break; - case VectorType::NeonPolyVector: + case VectorKind::NeonPoly: JOS.attribute("vectorKind", "neon poly"); break; - case VectorType::SveFixedLengthDataVector: + case VectorKind::SveFixedLengthData: JOS.attribute("vectorKind", "fixed-length sve data vector"); break; - case VectorType::SveFixedLengthPredicateVector: + case VectorKind::SveFixedLengthPredicate: JOS.attribute("vectorKind", "fixed-length sve predicate vector"); break; + case VectorKind::RVVFixedLengthData: + JOS.attribute("vectorKind", "fixed-length rvv data vector"); + break; + case VectorKind::RVVFixedLengthMask: + JOS.attribute("vectorKind", "fixed-length rvv mask vector"); + break; } } @@ -662,9 +748,11 @@ void JSONNodeDumper::VisitUnresolvedUsingType(const UnresolvedUsingType *UUT) { void JSONNodeDumper::VisitUnaryTransformType(const UnaryTransformType *UTT) { switch (UTT->getUTTKind()) { - case UnaryTransformType::EnumUnderlyingType: - JOS.attribute("transformKind", "underlying_type"); +#define TRANSFORM_TYPE_TRAIT_DEF(Enum, Trait) \ + case UnaryTransformType::Enum: \ + JOS.attribute("transformKind", #Trait); \ break; +#include "clang/Basic/TransformTypeTraits.def" } } @@ -680,6 +768,18 @@ void JSONNodeDumper::VisitTemplateTypeParmType( JOS.attribute("decl", createBareDeclRef(TTPT->getDecl())); } +void JSONNodeDumper::VisitSubstTemplateTypeParmType( + const SubstTemplateTypeParmType *STTPT) { + JOS.attribute("index", STTPT->getIndex()); + if (auto PackIndex = STTPT->getPackIndex()) + JOS.attribute("pack_index", *PackIndex); +} + +void JSONNodeDumper::VisitSubstTemplateTypeParmPackType( + const SubstTemplateTypeParmPackType *T) { + JOS.attribute("index", T->getIndex()); +} + void JSONNodeDumper::VisitAutoType(const AutoType *AT) { JOS.attribute("undeduced", !AT->isDeduced()); switch (AT->getKeyword()) { @@ -702,7 +802,7 @@ void JSONNodeDumper::VisitTemplateSpecializationType( std::string Str; llvm::raw_string_ostream OS(Str); TST->getTemplateName().print(OS, PrintPolicy); - JOS.attribute("templateName", OS.str()); + JOS.attribute("templateName", Str); } void JSONNodeDumper::VisitInjectedClassNameType( @@ -715,7 +815,7 @@ void JSONNodeDumper::VisitObjCInterfaceType(const ObjCInterfaceType *OIT) { } void JSONNodeDumper::VisitPackExpansionType(const PackExpansionType *PET) { - if (llvm::Optional<unsigned> N = PET->getNumExpansions()) + if (std::optional<unsigned> N = PET->getNumExpansions()) JOS.attribute("numExpansions", *N); } @@ -724,7 +824,7 @@ void JSONNodeDumper::VisitElaboratedType(const ElaboratedType *ET) { std::string Str; llvm::raw_string_ostream OS(Str); NNS->print(OS, PrintPolicy, /*ResolveTemplateArgs*/ true); - JOS.attribute("qualifier", OS.str()); + JOS.attribute("qualifier", Str); } if (const TagDecl *TD = ET->getOwnedTagDecl()) JOS.attribute("ownedTagDecl", createBareDeclRef(TD)); @@ -744,11 +844,28 @@ void JSONNodeDumper::VisitNamedDecl(const NamedDecl *ND) { JOS.attribute("name", ND->getNameAsString()); // FIXME: There are likely other contexts in which it makes no sense to ask // for a mangled name. - if (!isa<RequiresExprBodyDecl>(ND->getDeclContext())) { - std::string MangledName = ASTNameGen.getName(ND); - if (!MangledName.empty()) - JOS.attribute("mangledName", MangledName); - } + if (isa<RequiresExprBodyDecl>(ND->getDeclContext())) + return; + + // If the declaration is dependent or is in a dependent context, then the + // mangling is unlikely to be meaningful (and in some cases may cause + // "don't know how to mangle this" assertion failures. + if (ND->isTemplated()) + return; + + // Mangled names are not meaningful for locals, and may not be well-defined + // in the case of VLAs. + auto *VD = dyn_cast<VarDecl>(ND); + if (VD && VD->hasLocalStorage()) + return; + + // Do not mangle template deduction guides. + if (isa<CXXDeductionGuideDecl>(ND)) + return; + + std::string MangledName = ASTNameGen.getName(ND); + if (!MangledName.empty()) + JOS.attribute("mangledName", MangledName); } } @@ -765,9 +882,9 @@ void JSONNodeDumper::VisitTypeAliasDecl(const TypeAliasDecl *TAD) { void JSONNodeDumper::VisitNamespaceDecl(const NamespaceDecl *ND) { VisitNamedDecl(ND); attributeOnlyIfTrue("isInline", ND->isInline()); - if (!ND->isOriginalNamespace()) - JOS.attribute("originalNamespace", - createBareDeclRef(ND->getOriginalNamespace())); + attributeOnlyIfTrue("isNested", ND->isNested()); + if (!ND->isFirstDecl()) + JOS.attribute("originalNamespace", createBareDeclRef(ND->getFirstDecl())); } void JSONNodeDumper::VisitUsingDirectiveDecl(const UsingDirectiveDecl *UDD) { @@ -802,6 +919,9 @@ void JSONNodeDumper::VisitUsingShadowDecl(const UsingShadowDecl *USD) { void JSONNodeDumper::VisitVarDecl(const VarDecl *VD) { VisitNamedDecl(VD); JOS.attribute("type", createQualType(VD->getType())); + if (const auto *P = dyn_cast<ParmVarDecl>(VD)) + attributeOnlyIfTrue("explicitObjectParameter", + P->isExplicitObjectParameter()); StorageClass SC = VD->getStorageClass(); if (SC != SC_None) @@ -820,6 +940,9 @@ void JSONNodeDumper::VisitVarDecl(const VarDecl *VD) { case VarDecl::CInit: JOS.attribute("init", "c"); break; case VarDecl::CallInit: JOS.attribute("init", "call"); break; case VarDecl::ListInit: JOS.attribute("init", "list"); break; + case VarDecl::ParenListInit: + JOS.attribute("init", "paren-list"); + break; } } attributeOnlyIfTrue("isParameterPack", VD->isParameterPack()); @@ -842,14 +965,18 @@ void JSONNodeDumper::VisitFunctionDecl(const FunctionDecl *FD) { JOS.attribute("storageClass", VarDecl::getStorageClassSpecifierString(SC)); attributeOnlyIfTrue("inline", FD->isInlineSpecified()); attributeOnlyIfTrue("virtual", FD->isVirtualAsWritten()); - attributeOnlyIfTrue("pure", FD->isPure()); + attributeOnlyIfTrue("pure", FD->isPureVirtual()); attributeOnlyIfTrue("explicitlyDeleted", FD->isDeletedAsWritten()); attributeOnlyIfTrue("constexpr", FD->isConstexpr()); attributeOnlyIfTrue("variadic", FD->isVariadic()); + attributeOnlyIfTrue("immediate", FD->isImmediateFunction()); if (FD->isDefaulted()) JOS.attribute("explicitlyDefaulted", FD->isDeleted() ? "deleted" : "default"); + + if (StringLiteral *Msg = FD->getDeletedMessage()) + JOS.attribute("deletedMessage", Msg->getString()); } void JSONNodeDumper::VisitEnumDecl(const EnumDecl *ED) { @@ -886,6 +1013,11 @@ void JSONNodeDumper::VisitCXXRecordDecl(const CXXRecordDecl *RD) { } } +void JSONNodeDumper::VisitHLSLBufferDecl(const HLSLBufferDecl *D) { + VisitNamedDecl(D); + JOS.attribute("bufferKind", D->isCBuffer() ? "cbuffer" : "tbuffer"); +} + void JSONNodeDumper::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D) { VisitNamedDecl(D); JOS.attribute("tagUsed", D->wasDeclaredWithTypename() ? "typename" : "class"); @@ -895,7 +1027,7 @@ void JSONNodeDumper::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D) { if (D->hasDefaultArgument()) JOS.attributeObject("defaultArg", [=] { - Visit(D->getDefaultArgument(), SourceRange(), + Visit(D->getDefaultArgument().getArgument(), SourceRange(), D->getDefaultArgStorage().getInheritedFrom(), D->defaultArgumentWasInherited() ? "inherited from" : "previous"); }); @@ -911,7 +1043,7 @@ void JSONNodeDumper::VisitNonTypeTemplateParmDecl( if (D->hasDefaultArgument()) JOS.attributeObject("defaultArg", [=] { - Visit(D->getDefaultArgument(), SourceRange(), + Visit(D->getDefaultArgument().getArgument(), SourceRange(), D->getDefaultArgStorage().getInheritedFrom(), D->defaultArgumentWasInherited() ? "inherited from" : "previous"); }); @@ -937,8 +1069,12 @@ void JSONNodeDumper::VisitTemplateTemplateParmDecl( void JSONNodeDumper::VisitLinkageSpecDecl(const LinkageSpecDecl *LSD) { StringRef Lang; switch (LSD->getLanguage()) { - case LinkageSpecDecl::lang_c: Lang = "C"; break; - case LinkageSpecDecl::lang_cxx: Lang = "C++"; break; + case LinkageSpecLanguageIDs::C: + Lang = "C"; + break; + case LinkageSpecLanguageIDs::CXX: + Lang = "C++"; + break; } JOS.attribute("language", Lang); attributeOnlyIfTrue("hasBraces", LSD->hasBraces()); @@ -1096,6 +1232,10 @@ void JSONNodeDumper::VisitBlockDecl(const BlockDecl *D) { attributeOnlyIfTrue("capturesThis", D->capturesCXXThis()); } +void JSONNodeDumper::VisitAtomicExpr(const AtomicExpr *AE) { + JOS.attribute("name", AE->getOpAsString()); +} + void JSONNodeDumper::VisitObjCEncodeExpr(const ObjCEncodeExpr *OEE) { JOS.attribute("encodedType", createQualType(OEE->getEncodedType())); } @@ -1105,7 +1245,7 @@ void JSONNodeDumper::VisitObjCMessageExpr(const ObjCMessageExpr *OME) { llvm::raw_string_ostream OS(Str); OME->getSelector().print(OS); - JOS.attribute("selector", OS.str()); + JOS.attribute("selector", Str); switch (OME->getReceiverKind()) { case ObjCMessageExpr::Instance: @@ -1136,7 +1276,7 @@ void JSONNodeDumper::VisitObjCBoxedExpr(const ObjCBoxedExpr *OBE) { llvm::raw_string_ostream OS(Str); MD->getSelector().print(OS); - JOS.attribute("selector", OS.str()); + JOS.attribute("selector", Str); } } @@ -1145,7 +1285,7 @@ void JSONNodeDumper::VisitObjCSelectorExpr(const ObjCSelectorExpr *OSE) { llvm::raw_string_ostream OS(Str); OSE->getSelector().print(OS); - JOS.attribute("selector", OS.str()); + JOS.attribute("selector", Str); } void JSONNodeDumper::VisitObjCProtocolExpr(const ObjCProtocolExpr *OPE) { @@ -1201,6 +1341,7 @@ void JSONNodeDumper::VisitDeclRefExpr(const DeclRefExpr *DRE) { case NOUR_Constant: JOS.attribute("nonOdrUseReason", "constant"); break; case NOUR_Discarded: JOS.attribute("nonOdrUseReason", "discarded"); break; } + attributeOnlyIfTrue("isImmediateEscalating", DRE->isImmediateEscalating()); } void JSONNodeDumper::VisitSYCLUniqueStableNameExpr( @@ -1252,9 +1393,14 @@ void JSONNodeDumper::VisitCXXNewExpr(const CXXNewExpr *NE) { attributeOnlyIfTrue("isArray", NE->isArray()); attributeOnlyIfTrue("isPlacement", NE->getNumPlacementArgs() != 0); switch (NE->getInitializationStyle()) { - case CXXNewExpr::NoInit: break; - case CXXNewExpr::CallInit: JOS.attribute("initStyle", "call"); break; - case CXXNewExpr::ListInit: JOS.attribute("initStyle", "list"); break; + case CXXNewInitializationStyle::None: + break; + case CXXNewInitializationStyle::Parens: + JOS.attribute("initStyle", "call"); + break; + case CXXNewInitializationStyle::Braces: + JOS.attribute("initStyle", "list"); + break; } if (const FunctionDecl *FD = NE->getOperatorNew()) JOS.attribute("operatorNewDecl", createBareDeclRef(FD)); @@ -1360,18 +1506,19 @@ void JSONNodeDumper::VisitCXXConstructExpr(const CXXConstructExpr *CE) { attributeOnlyIfTrue("initializer_list", CE->isStdInitListInitialization()); attributeOnlyIfTrue("zeroing", CE->requiresZeroInitialization()); attributeOnlyIfTrue("hadMultipleCandidates", CE->hadMultipleCandidates()); + attributeOnlyIfTrue("isImmediateEscalating", CE->isImmediateEscalating()); switch (CE->getConstructionKind()) { - case CXXConstructExpr::CK_Complete: + case CXXConstructionKind::Complete: JOS.attribute("constructionKind", "complete"); break; - case CXXConstructExpr::CK_Delegating: + case CXXConstructionKind::Delegating: JOS.attribute("constructionKind", "delegating"); break; - case CXXConstructExpr::CK_NonVirtualBase: + case CXXConstructionKind::NonVirtualBase: JOS.attribute("constructionKind", "non-virtual base"); break; - case CXXConstructExpr::CK_VirtualBase: + case CXXConstructionKind::VirtualBase: JOS.attribute("constructionKind", "virtual base"); break; } @@ -1431,6 +1578,14 @@ void JSONNodeDumper::VisitMaterializeTemporaryExpr( attributeOnlyIfTrue("boundToLValueRef", MTE->isBoundToLvalueReference()); } +void JSONNodeDumper::VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *Node) { + attributeOnlyIfTrue("hasRewrittenInit", Node->hasRewrittenInit()); +} + +void JSONNodeDumper::VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *Node) { + attributeOnlyIfTrue("hasRewrittenInit", Node->hasRewrittenInit()); +} + void JSONNodeDumper::VisitCXXDependentScopeMemberExpr( const CXXDependentScopeMemberExpr *DSME) { JOS.attribute("isArrow", DSME->isArrow()); @@ -1478,7 +1633,7 @@ void JSONNodeDumper::VisitStringLiteral(const StringLiteral *SL) { std::string Buffer; llvm::raw_string_ostream SS(Buffer); SL->outputString(SS); - JOS.attribute("value", SS.str()); + JOS.attribute("value", Buffer); } void JSONNodeDumper::VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *BLE) { JOS.attribute("value", BLE->getValue()); @@ -1489,6 +1644,8 @@ void JSONNodeDumper::VisitIfStmt(const IfStmt *IS) { attributeOnlyIfTrue("hasVar", IS->hasVarStorage()); attributeOnlyIfTrue("hasElse", IS->hasElseStorage()); attributeOnlyIfTrue("isConstexpr", IS->isConstexpr()); + attributeOnlyIfTrue("isConsteval", IS->isConsteval()); + attributeOnlyIfTrue("constevalIsNegated", IS->isNegatedConsteval()); } void JSONNodeDumper::VisitSwitchStmt(const SwitchStmt *SS) { @@ -1572,19 +1729,19 @@ void JSONNodeDumper::visitInlineCommandComment( JOS.attribute("name", getCommentCommandName(C->getCommandID())); switch (C->getRenderKind()) { - case comments::InlineCommandComment::RenderNormal: + case comments::InlineCommandRenderKind::Normal: JOS.attribute("renderKind", "normal"); break; - case comments::InlineCommandComment::RenderBold: + case comments::InlineCommandRenderKind::Bold: JOS.attribute("renderKind", "bold"); break; - case comments::InlineCommandComment::RenderEmphasized: + case comments::InlineCommandRenderKind::Emphasized: JOS.attribute("renderKind", "emphasized"); break; - case comments::InlineCommandComment::RenderMonospaced: + case comments::InlineCommandRenderKind::Monospaced: JOS.attribute("renderKind", "monospaced"); break; - case comments::InlineCommandComment::RenderAnchor: + case comments::InlineCommandRenderKind::Anchor: JOS.attribute("renderKind", "anchor"); break; } @@ -1632,13 +1789,13 @@ void JSONNodeDumper::visitBlockCommandComment( void JSONNodeDumper::visitParamCommandComment( const comments::ParamCommandComment *C, const comments::FullComment *FC) { switch (C->getDirection()) { - case comments::ParamCommandComment::In: + case comments::ParamCommandPassDirection::In: JOS.attribute("direction", "in"); break; - case comments::ParamCommandComment::Out: + case comments::ParamCommandPassDirection::Out: JOS.attribute("direction", "out"); break; - case comments::ParamCommandComment::InOut: + case comments::ParamCommandPassDirection::InOut: JOS.attribute("direction", "in,out"); break; } @@ -1683,3 +1840,18 @@ void JSONNodeDumper::visitVerbatimLineComment( const comments::VerbatimLineComment *C, const comments::FullComment *) { JOS.attribute("text", C->getText()); } + +llvm::json::Object JSONNodeDumper::createFPOptions(FPOptionsOverride FPO) { + llvm::json::Object Ret; +#define OPTION(NAME, TYPE, WIDTH, PREVIOUS) \ + if (FPO.has##NAME##Override()) \ + Ret.try_emplace(#NAME, static_cast<unsigned>(FPO.get##NAME##Override())); +#include "clang/Basic/FPOptions.def" + return Ret; +} + +void JSONNodeDumper::VisitCompoundStmt(const CompoundStmt *S) { + VisitStmt(S); + if (S->hasStoredFPFeatures()) + JOS.attribute("fpoptions", createFPOptions(S->getStoredFPFeatures())); +} diff --git a/contrib/llvm-project/clang/lib/AST/Linkage.h b/contrib/llvm-project/clang/lib/AST/Linkage.h index cd50d138790a..e4dcb5e53261 100644 --- a/contrib/llvm-project/clang/lib/AST/Linkage.h +++ b/contrib/llvm-project/clang/lib/AST/Linkage.h @@ -19,8 +19,8 @@ #include "clang/AST/DeclCXX.h" #include "clang/AST/Type.h" #include "llvm/ADT/DenseMap.h" -#include "llvm/ADT/Optional.h" #include "llvm/ADT/PointerIntPair.h" +#include <optional> namespace clang { /// Kinds of LV computation. The linkage side of the computation is @@ -29,12 +29,15 @@ namespace clang { struct LVComputationKind { /// The kind of entity whose visibility is ultimately being computed; /// visibility computations for types and non-types follow different rules. + LLVM_PREFERRED_TYPE(bool) unsigned ExplicitKind : 1; /// Whether explicit visibility attributes should be ignored. When set, /// visibility may only be restricted by the visibility of template arguments. + LLVM_PREFERRED_TYPE(bool) unsigned IgnoreExplicitVisibility : 1; /// Whether all visibility should be ignored. When set, we're only interested /// in computing linkage. + LLVM_PREFERRED_TYPE(bool) unsigned IgnoreAllVisibility : 1; enum { NumLVComputationKindBits = 3 }; @@ -91,11 +94,11 @@ class LinkageComputer { return QueryType(ND, Kind.toBits()); } - llvm::Optional<LinkageInfo> lookup(const NamedDecl *ND, - LVComputationKind Kind) const { + std::optional<LinkageInfo> lookup(const NamedDecl *ND, + LVComputationKind Kind) const { auto Iter = CachedLinkageInfo.find(makeCacheKey(ND, Kind)); if (Iter == CachedLinkageInfo.end()) - return None; + return std::nullopt; return Iter->second; } diff --git a/contrib/llvm-project/clang/lib/AST/Mangle.cpp b/contrib/llvm-project/clang/lib/AST/Mangle.cpp index 54dbf484f377..75f6e2161a63 100644 --- a/contrib/llvm-project/clang/lib/AST/Mangle.cpp +++ b/contrib/llvm-project/clang/lib/AST/Mangle.cpp @@ -70,11 +70,9 @@ static CCMangling getCallingConvMangling(const ASTContext &Context, // On wasm, the argc/argv form of "main" is renamed so that the startup code // can call it with the correct function signature. - // On Emscripten, users may be exporting "main" and expecting to call it - // themselves, so we can't mangle it. - if (Triple.isWasm() && !Triple.isOSEmscripten()) + if (Triple.isWasm()) if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) - if (FD->isMain() && FD->hasPrototype() && FD->param_size() == 2) + if (FD->isMain() && FD->getNumParams() == 2) return CCM_WasmMainArgcArgv; if (!Triple.isOSWindows() || !Triple.isX86()) @@ -149,7 +147,7 @@ void MangleContext::mangleName(GlobalDecl GD, raw_ostream &Out) { // If the label isn't literal, or if this is an alias for an LLVM intrinsic, // do not add a "\01" prefix. - if (!ALA->getIsLiteralLabel() || ALA->getLabel().startswith("llvm.")) { + if (!ALA->getIsLiteralLabel() || ALA->getLabel().starts_with("llvm.")) { Out << ALA->getLabel(); return; } @@ -200,8 +198,12 @@ void MangleContext::mangleName(GlobalDecl GD, raw_ostream &Out) { Out << '_'; else if (CC == CCM_Fast) Out << '@'; - else if (CC == CCM_RegCall) - Out << "__regcall3__"; + else if (CC == CCM_RegCall) { + if (getASTContext().getLangOpts().RegCall4) + Out << "__regcall4__"; + else + Out << "__regcall3__"; + } if (!MCXX) Out << D->getIdentifier()->getName(); @@ -223,14 +225,20 @@ void MangleContext::mangleName(GlobalDecl GD, raw_ostream &Out) { assert(!Proto->isVariadic()); unsigned ArgWords = 0; if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) - if (!MD->isStatic()) + if (MD->isImplicitObjectMemberFunction()) ++ArgWords; - for (const auto &AT : Proto->param_types()) + uint64_t DefaultPtrWidth = TI.getPointerWidth(LangAS::Default); + for (const auto &AT : Proto->param_types()) { + // If an argument type is incomplete there is no way to get its size to + // correctly encode into the mangling scheme. + // Follow GCCs behaviour by simply breaking out of the loop. + if (AT->isIncompleteType()) + break; // Size should be aligned to pointer size. - ArgWords += - llvm::alignTo(ASTContext.getTypeSize(AT), TI.getPointerWidth(0)) / - TI.getPointerWidth(0); - Out << ((TI.getPointerWidth(0) / 8) * ArgWords); + ArgWords += llvm::alignTo(ASTContext.getTypeSize(AT), DefaultPtrWidth) / + DefaultPtrWidth; + } + Out << ((DefaultPtrWidth / 8) * ArgWords); } void MangleContext::mangleMSGuidDecl(const MSGuidDecl *GD, raw_ostream &Out) { @@ -293,9 +301,8 @@ void MangleContext::mangleBlock(const DeclContext *DC, const BlockDecl *BD, } else { assert((isa<NamedDecl>(DC) || isa<BlockDecl>(DC)) && "expected a NamedDecl or BlockDecl"); - if (isa<BlockDecl>(DC)) - for (; DC && isa<BlockDecl>(DC); DC = DC->getParent()) - (void) getBlockId(cast<BlockDecl>(DC), true); + for (; isa_and_nonnull<BlockDecl>(DC); DC = DC->getParent()) + (void)getBlockId(cast<BlockDecl>(DC), true); assert((isa<TranslationUnitDecl>(DC) || isa<NamedDecl>(DC)) && "expected a TranslationUnitDecl or a NamedDecl"); if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC)) @@ -456,7 +463,7 @@ public: SmallString<40> Mangled; auto Prefix = getClassSymbolPrefix(Kind, OCD->getASTContext()); llvm::Mangler::getNameWithPrefix(Mangled, Prefix + ClassName, DL); - return std::string(Mangled.str()); + return std::string(Mangled); }; return { @@ -506,10 +513,20 @@ public: } } else if (const auto *MD = dyn_cast_or_null<CXXMethodDecl>(ND)) { Manglings.emplace_back(getName(ND)); - if (MD->isVirtual()) - if (const auto *TIV = Ctx.getVTableContext()->getThunkInfo(MD)) - for (const auto &T : *TIV) - Manglings.emplace_back(getMangledThunk(MD, T)); + if (MD->isVirtual()) { + if (const auto *TIV = Ctx.getVTableContext()->getThunkInfo(MD)) { + for (const auto &T : *TIV) { + std::string ThunkName; + std::string ContextualizedName = + getMangledThunk(MD, T, /* ElideOverrideInfo */ false); + if (Ctx.useAbbreviatedThunkName(MD, ContextualizedName)) + ThunkName = getMangledThunk(MD, T, /* ElideOverrideInfo */ true); + else + ThunkName = ContextualizedName; + Manglings.emplace_back(ThunkName); + } + } + } } return Manglings; @@ -562,11 +579,12 @@ private: return BOS.str(); } - std::string getMangledThunk(const CXXMethodDecl *MD, const ThunkInfo &T) { + std::string getMangledThunk(const CXXMethodDecl *MD, const ThunkInfo &T, + bool ElideOverrideInfo) { std::string FrontendBuf; llvm::raw_string_ostream FOS(FrontendBuf); - MC->mangleThunk(MD, T, FOS); + MC->mangleThunk(MD, T, ElideOverrideInfo, FOS); std::string BackendBuf; llvm::raw_string_ostream BOS(BackendBuf); diff --git a/contrib/llvm-project/clang/lib/AST/MicrosoftCXXABI.cpp b/contrib/llvm-project/clang/lib/AST/MicrosoftCXXABI.cpp index 166aa3b3bd60..1c020c3ad4ad 100644 --- a/contrib/llvm-project/clang/lib/AST/MicrosoftCXXABI.cpp +++ b/contrib/llvm-project/clang/lib/AST/MicrosoftCXXABI.cpp @@ -30,14 +30,12 @@ namespace { /// Typically these are things like static locals, lambdas, or blocks. class MicrosoftNumberingContext : public MangleNumberingContext { llvm::DenseMap<const Type *, unsigned> ManglingNumbers; - unsigned LambdaManglingNumber; - unsigned StaticLocalNumber; - unsigned StaticThreadlocalNumber; + unsigned LambdaManglingNumber = 0; + unsigned StaticLocalNumber = 0; + unsigned StaticThreadlocalNumber = 0; public: - MicrosoftNumberingContext() - : MangleNumberingContext(), LambdaManglingNumber(0), - StaticLocalNumber(0), StaticThreadlocalNumber(0) {} + MicrosoftNumberingContext() = default; unsigned getManglingNumber(const CXXMethodDecl *CallOperator) override { return ++LambdaManglingNumber; @@ -69,6 +67,7 @@ class MSHIPNumberingContext : public MicrosoftNumberingContext { std::unique_ptr<MangleNumberingContext> DeviceCtx; public: + using MicrosoftNumberingContext::getManglingNumber; MSHIPNumberingContext(MangleContext *DeviceMangler) { DeviceCtx = createItaniumNumberingContext(DeviceMangler); } @@ -76,6 +75,33 @@ public: unsigned getDeviceManglingNumber(const CXXMethodDecl *CallOperator) override { return DeviceCtx->getManglingNumber(CallOperator); } + + unsigned getManglingNumber(const TagDecl *TD, + unsigned MSLocalManglingNumber) override { + unsigned DeviceN = DeviceCtx->getManglingNumber(TD, MSLocalManglingNumber); + unsigned HostN = + MicrosoftNumberingContext::getManglingNumber(TD, MSLocalManglingNumber); + if (DeviceN > 0xFFFF || HostN > 0xFFFF) { + DiagnosticsEngine &Diags = TD->getASTContext().getDiagnostics(); + unsigned DiagID = Diags.getCustomDiagID( + DiagnosticsEngine::Error, "Mangling number exceeds limit (65535)"); + Diags.Report(TD->getLocation(), DiagID); + } + return (DeviceN << 16) | HostN; + } +}; + +class MSSYCLNumberingContext : public MicrosoftNumberingContext { + std::unique_ptr<MangleNumberingContext> DeviceCtx; + +public: + MSSYCLNumberingContext(MangleContext *DeviceMangler) { + DeviceCtx = createItaniumNumberingContext(DeviceMangler); + } + + unsigned getDeviceManglingNumber(const CXXMethodDecl *CallOperator) override { + return DeviceCtx->getManglingNumber(CallOperator); + } }; class MicrosoftCXXABI : public CXXABI { @@ -100,6 +126,10 @@ public: DeviceMangler.reset( Context.createMangleContext(Context.getAuxTargetInfo())); } + else if (Context.getLangOpts().isSYCL()) { + DeviceMangler.reset( + ItaniumMangleContext::create(Context, Context.getDiagnostics())); + } } MemberPointerInfo @@ -162,7 +192,11 @@ public: if (Context.getLangOpts().CUDA && Context.getAuxTargetInfo()) { assert(DeviceMangler && "Missing device mangler"); return std::make_unique<MSHIPNumberingContext>(DeviceMangler.get()); + } else if (Context.getLangOpts().isSYCL()) { + assert(DeviceMangler && "Missing device mangler"); + return std::make_unique<MSSYCLNumberingContext>(DeviceMangler.get()); } + return std::make_unique<MicrosoftNumberingContext>(); } }; @@ -267,7 +301,7 @@ CXXABI::MemberPointerInfo MicrosoftCXXABI::getMemberPointerInfo( // The nominal struct is laid out with pointers followed by ints and aligned // to a pointer width if any are present and an int width otherwise. const TargetInfo &Target = Context.getTargetInfo(); - unsigned PtrSize = Target.getPointerWidth(0); + unsigned PtrSize = Target.getPointerWidth(LangAS::Default); unsigned IntSize = Target.getIntWidth(); unsigned Ptrs, Ints; @@ -282,7 +316,7 @@ CXXABI::MemberPointerInfo MicrosoftCXXABI::getMemberPointerInfo( if (Ptrs + Ints > 1 && Target.getTriple().isArch32Bit()) MPI.Align = 64; else if (Ptrs) - MPI.Align = Target.getPointerAlign(0); + MPI.Align = Target.getPointerAlign(LangAS::Default); else MPI.Align = Target.getIntAlign(); diff --git a/contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp b/contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp index d89cddd2adda..4016043df62e 100644 --- a/contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp +++ b/contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp @@ -21,6 +21,7 @@ #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" +#include "clang/AST/GlobalDecl.h" #include "clang/AST/Mangle.h" #include "clang/AST/VTableBuilder.h" #include "clang/Basic/ABI.h" @@ -28,17 +29,32 @@ #include "clang/Basic/FileManager.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/TargetInfo.h" +#include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringExtras.h" #include "llvm/Support/CRC.h" #include "llvm/Support/MD5.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/StringSaver.h" #include "llvm/Support/xxhash.h" +#include <functional> +#include <optional> using namespace clang; namespace { +// Get GlobalDecl of DeclContext of local entities. +static GlobalDecl getGlobalDeclAsDeclContext(const DeclContext *DC) { + GlobalDecl GD; + if (auto *CD = dyn_cast<CXXConstructorDecl>(DC)) + GD = GlobalDecl(CD, Ctor_Complete); + else if (auto *DD = dyn_cast<CXXDestructorDecl>(DC)) + GD = GlobalDecl(DD, Dtor_Complete); + else + GD = GlobalDecl(cast<FunctionDecl>(DC)); + return GD; +} + struct msvc_hashing_ostream : public llvm::raw_svector_ostream { raw_ostream &OS; llvm::SmallString<64> Buffer; @@ -47,7 +63,7 @@ struct msvc_hashing_ostream : public llvm::raw_svector_ostream { : llvm::raw_svector_ostream(Buffer), OS(OS) {} ~msvc_hashing_ostream() override { StringRef MangledName = str(); - bool StartsWithEscape = MangledName.startswith("\01"); + bool StartsWithEscape = MangledName.starts_with("\01"); if (StartsWithEscape) MangledName = MangledName.drop_front(1); if (MangledName.size() < 4096) { @@ -129,12 +145,13 @@ class MicrosoftMangleContextImpl : public MicrosoftMangleContext { llvm::DenseMap<DiscriminatorKeyTy, unsigned> Discriminator; llvm::DenseMap<const NamedDecl *, unsigned> Uniquifier; llvm::DenseMap<const CXXRecordDecl *, unsigned> LambdaIds; - llvm::DenseMap<const NamedDecl *, unsigned> SEHFilterIds; - llvm::DenseMap<const NamedDecl *, unsigned> SEHFinallyIds; + llvm::DenseMap<GlobalDecl, unsigned> SEHFilterIds; + llvm::DenseMap<GlobalDecl, unsigned> SEHFinallyIds; SmallString<16> AnonymousNamespaceHash; public: - MicrosoftMangleContextImpl(ASTContext &Context, DiagnosticsEngine &Diags); + MicrosoftMangleContextImpl(ASTContext &Context, DiagnosticsEngine &Diags, + bool IsAux = false); bool shouldMangleCXXName(const NamedDecl *D) override; bool shouldMangleStringLiteral(const StringLiteral *SL) override; void mangleCXXName(GlobalDecl GD, raw_ostream &Out) override; @@ -142,9 +159,9 @@ public: const MethodVFTableLocation &ML, raw_ostream &Out) override; void mangleThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk, - raw_ostream &) override; + bool ElideOverrideInfo, raw_ostream &) override; void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type, - const ThisAdjustment &ThisAdjustment, + const ThunkInfo &Thunk, bool ElideOverrideInfo, raw_ostream &) override; void mangleCXXVFTable(const CXXRecordDecl *Derived, ArrayRef<const CXXRecordDecl *> BasePath, @@ -152,6 +169,8 @@ public: void mangleCXXVBTable(const CXXRecordDecl *Derived, ArrayRef<const CXXRecordDecl *> BasePath, raw_ostream &Out) override; + + void mangleCXXVTable(const CXXRecordDecl *, raw_ostream &) override; void mangleCXXVirtualDisplacementMap(const CXXRecordDecl *SrcRD, const CXXRecordDecl *DstRD, raw_ostream &Out) override; @@ -165,7 +184,8 @@ public: int32_t VBPtrOffset, uint32_t VBIndex, raw_ostream &Out) override; void mangleCXXRTTI(QualType T, raw_ostream &Out) override; - void mangleCXXRTTIName(QualType T, raw_ostream &Out) override; + void mangleCXXRTTIName(QualType T, raw_ostream &Out, + bool NormalizeIntegers) override; void mangleCXXRTTIBaseClassDescriptor(const CXXRecordDecl *Derived, uint32_t NVOffset, int32_t VBPtrOffset, uint32_t VBTableOffset, uint32_t Flags, @@ -178,7 +198,8 @@ public: mangleCXXRTTICompleteObjectLocator(const CXXRecordDecl *Derived, ArrayRef<const CXXRecordDecl *> BasePath, raw_ostream &Out) override; - void mangleTypeName(QualType T, raw_ostream &) override; + void mangleCanonicalTypeName(QualType T, raw_ostream &, + bool NormalizeIntegers) override; void mangleReferenceTemporary(const VarDecl *, unsigned ManglingNumber, raw_ostream &) override; void mangleStaticGuardVariable(const VarDecl *D, raw_ostream &Out) override; @@ -187,9 +208,9 @@ public: void mangleDynamicInitializer(const VarDecl *D, raw_ostream &Out) override; void mangleDynamicAtExitDestructor(const VarDecl *D, raw_ostream &Out) override; - void mangleSEHFilterExpression(const NamedDecl *EnclosingDecl, + void mangleSEHFilterExpression(GlobalDecl EnclosingDecl, raw_ostream &Out) override; - void mangleSEHFinallyBlock(const NamedDecl *EnclosingDecl, + void mangleSEHFinallyBlock(GlobalDecl EnclosingDecl, raw_ostream &Out) override; void mangleStringLiteral(const StringLiteral *SL, raw_ostream &Out) override; bool getNextDiscriminator(const NamedDecl *ND, unsigned &disc) { @@ -208,7 +229,7 @@ public: // Use the canonical number for externally visible decls. if (ND->isExternallyVisible()) { - disc = getASTContext().getManglingNumber(ND); + disc = getASTContext().getManglingNumber(ND, isAux()); return true; } @@ -271,12 +292,8 @@ public: assert(!RD->isExternallyVisible() && "RD must not be visible!"); assert(RD->getLambdaManglingNumber() == 0 && "RD must not have a mangling number!"); - llvm::DenseMap<const CXXRecordDecl *, unsigned>::iterator Result = - LambdaIds.find(RD); // The lambda should exist, but return 0 in case it doesn't. - if (Result == LambdaIds.end()) - return 0; - return Result->second; + return LambdaIds.lookup(RD); } /// Return a character sequence that is (somewhat) unique to the TU suitable @@ -310,8 +327,8 @@ class MicrosoftCXXNameMangler { typedef llvm::DenseMap<const void *, StringRef> TemplateArgStringMap; TemplateArgStringMap TemplateArgStrings; - llvm::StringSaver TemplateArgStringStorage; llvm::BumpPtrAllocator TemplateArgStringStorageAlloc; + llvm::StringSaver TemplateArgStringStorage; typedef std::set<std::pair<int, bool>> PassObjectSizeArgsSet; PassObjectSizeArgsSet PassObjectSizeArgs; @@ -322,38 +339,52 @@ class MicrosoftCXXNameMangler { public: enum QualifierMangleMode { QMM_Drop, QMM_Mangle, QMM_Escape, QMM_Result }; + enum class TplArgKind { ClassNTTP, StructuralValue }; MicrosoftCXXNameMangler(MicrosoftMangleContextImpl &C, raw_ostream &Out_) : Context(C), Out(Out_), Structor(nullptr), StructorType(-1), TemplateArgStringStorage(TemplateArgStringStorageAlloc), - PointersAre64Bit(C.getASTContext().getTargetInfo().getPointerWidth(0) == - 64) {} + PointersAre64Bit(C.getASTContext().getTargetInfo().getPointerWidth( + LangAS::Default) == 64) {} MicrosoftCXXNameMangler(MicrosoftMangleContextImpl &C, raw_ostream &Out_, const CXXConstructorDecl *D, CXXCtorType Type) : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type), TemplateArgStringStorage(TemplateArgStringStorageAlloc), - PointersAre64Bit(C.getASTContext().getTargetInfo().getPointerWidth(0) == - 64) {} + PointersAre64Bit(C.getASTContext().getTargetInfo().getPointerWidth( + LangAS::Default) == 64) {} MicrosoftCXXNameMangler(MicrosoftMangleContextImpl &C, raw_ostream &Out_, const CXXDestructorDecl *D, CXXDtorType Type) : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type), TemplateArgStringStorage(TemplateArgStringStorageAlloc), - PointersAre64Bit(C.getASTContext().getTargetInfo().getPointerWidth(0) == - 64) {} + PointersAre64Bit(C.getASTContext().getTargetInfo().getPointerWidth( + LangAS::Default) == 64) {} raw_ostream &getStream() const { return Out; } - void mangle(const NamedDecl *D, StringRef Prefix = "?"); - void mangleName(const NamedDecl *ND); - void mangleFunctionEncoding(const FunctionDecl *FD, bool ShouldMangle); + void mangle(GlobalDecl GD, StringRef Prefix = "?"); + void mangleName(GlobalDecl GD); + void mangleFunctionEncoding(GlobalDecl GD, bool ShouldMangle); void mangleVariableEncoding(const VarDecl *VD); void mangleMemberDataPointer(const CXXRecordDecl *RD, const ValueDecl *VD, + const NonTypeTemplateParmDecl *PD, + QualType TemplateArgType, StringRef Prefix = "$"); + void mangleMemberDataPointerInClassNTTP(const CXXRecordDecl *, + const ValueDecl *); void mangleMemberFunctionPointer(const CXXRecordDecl *RD, const CXXMethodDecl *MD, + const NonTypeTemplateParmDecl *PD, + QualType TemplateArgType, StringRef Prefix = "$"); + void mangleFunctionPointer(const FunctionDecl *FD, + const NonTypeTemplateParmDecl *PD, + QualType TemplateArgType); + void mangleVarDecl(const VarDecl *VD, const NonTypeTemplateParmDecl *PD, + QualType TemplateArgType); + void mangleMemberFunctionPointerInClassNTTP(const CXXRecordDecl *RD, + const CXXMethodDecl *MD); void mangleVirtualMemPtrThunk(const CXXMethodDecl *MD, const MethodVFTableLocation &ML); void mangleNumber(int64_t Number); @@ -362,7 +393,7 @@ public: void mangleBits(llvm::APInt Number); void mangleTagTypeKind(TagTypeKind TK); void mangleArtificialTagType(TagTypeKind TK, StringRef UnqualifiedName, - ArrayRef<StringRef> NestedNames = None); + ArrayRef<StringRef> NestedNames = std::nullopt); void mangleAddressSpaceType(QualType T, Qualifiers Quals, SourceRange Range); void mangleType(QualType T, SourceRange Range, QualifierMangleMode QMM = QMM_Mangle); @@ -370,7 +401,8 @@ public: const FunctionDecl *D = nullptr, bool ForceThisQuals = false, bool MangleExceptionSpec = true); - void mangleNestedName(const NamedDecl *ND); + void mangleSourceName(StringRef Name); + void mangleNestedName(GlobalDecl GD); private: bool isStructorDecl(const NamedDecl *ND) const { @@ -384,11 +416,10 @@ private: AddrSpace == LangAS::ptr32_uptr)); } - void mangleUnqualifiedName(const NamedDecl *ND) { - mangleUnqualifiedName(ND, ND->getDeclName()); + void mangleUnqualifiedName(GlobalDecl GD) { + mangleUnqualifiedName(GD, cast<NamedDecl>(GD.getDecl())->getDeclName()); } - void mangleUnqualifiedName(const NamedDecl *ND, DeclarationName Name); - void mangleSourceName(StringRef Name); + void mangleUnqualifiedName(GlobalDecl GD, DeclarationName Name); void mangleOperatorName(OverloadedOperatorKind OO, SourceLocation Loc); void mangleCXXDtorType(CXXDtorType T); void mangleQualifiers(Qualifiers Quals, bool IsMember); @@ -396,9 +427,9 @@ private: void manglePointerCVQualifiers(Qualifiers Quals); void manglePointerExtQualifiers(Qualifiers Quals, QualType PointeeType); - void mangleUnscopedTemplateName(const TemplateDecl *ND); + void mangleUnscopedTemplateName(GlobalDecl GD); void - mangleTemplateInstantiationName(const TemplateDecl *TD, + mangleTemplateInstantiationName(GlobalDecl GD, const TemplateArgumentList &TemplateArgs); void mangleObjCMethodName(const ObjCMethodDecl *MD); @@ -422,8 +453,8 @@ private: void mangleDecayedArrayType(const ArrayType *T); void mangleArrayType(const ArrayType *T); void mangleFunctionClass(const FunctionDecl *FD); - void mangleCallingConvention(CallingConv CC); - void mangleCallingConvention(const FunctionType *T); + void mangleCallingConvention(CallingConv CC, SourceRange Range); + void mangleCallingConvention(const FunctionType *T, SourceRange Range); void mangleIntegerLiteral(const llvm::APSInt &Number, const NonTypeTemplateParmDecl *PD = nullptr, QualType TemplateArgType = QualType()); @@ -434,7 +465,7 @@ private: const TemplateArgumentList &TemplateArgs); void mangleTemplateArg(const TemplateDecl *TD, const TemplateArgument &TA, const NamedDecl *Parm); - void mangleTemplateArgValue(QualType T, const APValue &V, + void mangleTemplateArgValue(QualType T, const APValue &V, TplArgKind, bool WithScalarType = false); void mangleObjCProtocol(const ObjCProtocolDecl *PD); @@ -446,8 +477,9 @@ private: } MicrosoftMangleContextImpl::MicrosoftMangleContextImpl(ASTContext &Context, - DiagnosticsEngine &Diags) - : MicrosoftMangleContext(Context, Diags) { + DiagnosticsEngine &Diags, + bool IsAux) + : MicrosoftMangleContext(Context, Diags, IsAux) { // To mangle anonymous namespaces, hash the path to the main source file. The // path should be whatever (probably relative) path was passed on the command // line. The goal is for the compiler to produce the same output regardless of @@ -463,9 +495,9 @@ MicrosoftMangleContextImpl::MicrosoftMangleContextImpl(ASTContext &Context, // The generated names are intended to look similar to what MSVC generates, // which are something like "?A0x01234567@". SourceManager &SM = Context.getSourceManager(); - if (const FileEntry *FE = SM.getFileEntryForID(SM.getMainFileID())) { + if (OptionalFileEntryRef FE = SM.getFileEntryRefForID(SM.getMainFileID())) { // Truncate the hash so we get 8 characters of hexadecimal. - uint32_t TruncatedHash = uint32_t(xxHash64(FE->getName())); + uint32_t TruncatedHash = uint32_t(xxh3_64bits(FE->getName())); AnonymousNamespaceHash = llvm::utohexstr(TruncatedHash); } else { // If we don't have a path to the main file, we'll just use 0. @@ -519,9 +551,8 @@ bool MicrosoftMangleContextImpl::shouldMangleCXXName(const NamedDecl *D) { while (!DC->isNamespace() && !DC->isTranslationUnit()) DC = getEffectiveParentContext(DC); - if (DC->isTranslationUnit() && D->getFormalLinkage() == InternalLinkage && - !isa<VarTemplateSpecializationDecl>(D) && - D->getIdentifier() != nullptr) + if (DC->isTranslationUnit() && D->getFormalLinkage() == Linkage::Internal && + !isa<VarTemplateSpecializationDecl>(D) && D->getIdentifier() != nullptr) return false; } @@ -533,7 +564,8 @@ MicrosoftMangleContextImpl::shouldMangleStringLiteral(const StringLiteral *SL) { return true; } -void MicrosoftCXXNameMangler::mangle(const NamedDecl *D, StringRef Prefix) { +void MicrosoftCXXNameMangler::mangle(GlobalDecl GD, StringRef Prefix) { + const NamedDecl *D = cast<NamedDecl>(GD.getDecl()); // MSVC doesn't mangle C++ names the same way it mangles extern "C" names. // Therefore it's really important that we don't decorate the // name with leading underscores or leading/trailing at signs. So, by @@ -542,9 +574,9 @@ void MicrosoftCXXNameMangler::mangle(const NamedDecl *D, StringRef Prefix) { // <mangled-name> ::= ? <name> <type-encoding> Out << Prefix; - mangleName(D); + mangleName(GD); if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) - mangleFunctionEncoding(FD, Context.shouldMangleDeclName(FD)); + mangleFunctionEncoding(GD, Context.shouldMangleDeclName(FD)); else if (const VarDecl *VD = dyn_cast<VarDecl>(D)) mangleVariableEncoding(VD); else if (isa<MSGuidDecl>(D)) @@ -558,8 +590,9 @@ void MicrosoftCXXNameMangler::mangle(const NamedDecl *D, StringRef Prefix) { llvm_unreachable("Tried to mangle unexpected NamedDecl!"); } -void MicrosoftCXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD, +void MicrosoftCXXNameMangler::mangleFunctionEncoding(GlobalDecl GD, bool ShouldMangle) { + const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); // <type-encoding> ::= <function-class> <function-type> // Since MSVC operates on the type as written and not the canonical type, it @@ -644,12 +677,17 @@ void MicrosoftCXXNameMangler::mangleVariableEncoding(const VarDecl *VD) { } } -void MicrosoftCXXNameMangler::mangleMemberDataPointer(const CXXRecordDecl *RD, - const ValueDecl *VD, - StringRef Prefix) { +void MicrosoftCXXNameMangler::mangleMemberDataPointer( + const CXXRecordDecl *RD, const ValueDecl *VD, + const NonTypeTemplateParmDecl *PD, QualType TemplateArgType, + StringRef Prefix) { // <member-data-pointer> ::= <integer-literal> // ::= $F <number> <number> // ::= $G <number> <number> <number> + // + // <auto-nttp> ::= $ M <type> <integer-literal> + // <auto-nttp> ::= $ M <type> F <name> <number> + // <auto-nttp> ::= $ M <type> G <name> <number> <number> int64_t FieldOffset; int64_t VBTableOffset; @@ -678,7 +716,18 @@ void MicrosoftCXXNameMangler::mangleMemberDataPointer(const CXXRecordDecl *RD, case MSInheritanceModel::Unspecified: Code = 'G'; break; } - Out << Prefix << Code; + Out << Prefix; + + if (VD && + getASTContext().getLangOpts().isCompatibleWithMSVC( + LangOptions::MSVC2019) && + PD && PD->getType()->getTypeClass() == Type::Auto && + !TemplateArgType.isNull()) { + Out << "M"; + mangleType(TemplateArgType, SourceRange(), QMM_Drop); + } + + Out << Code; mangleNumber(FieldOffset); @@ -691,14 +740,41 @@ void MicrosoftCXXNameMangler::mangleMemberDataPointer(const CXXRecordDecl *RD, mangleNumber(VBTableOffset); } -void -MicrosoftCXXNameMangler::mangleMemberFunctionPointer(const CXXRecordDecl *RD, - const CXXMethodDecl *MD, - StringRef Prefix) { +void MicrosoftCXXNameMangler::mangleMemberDataPointerInClassNTTP( + const CXXRecordDecl *RD, const ValueDecl *VD) { + MSInheritanceModel IM = RD->getMSInheritanceModel(); + // <nttp-class-member-data-pointer> ::= <member-data-pointer> + // ::= N + // ::= 8 <postfix> @ <unqualified-name> @ + + if (IM != MSInheritanceModel::Single && IM != MSInheritanceModel::Multiple) + return mangleMemberDataPointer(RD, VD, nullptr, QualType(), ""); + + if (!VD) { + Out << 'N'; + return; + } + + Out << '8'; + mangleNestedName(VD); + Out << '@'; + mangleUnqualifiedName(VD); + Out << '@'; +} + +void MicrosoftCXXNameMangler::mangleMemberFunctionPointer( + const CXXRecordDecl *RD, const CXXMethodDecl *MD, + const NonTypeTemplateParmDecl *PD, QualType TemplateArgType, + StringRef Prefix) { // <member-function-pointer> ::= $1? <name> // ::= $H? <name> <number> // ::= $I? <name> <number> <number> // ::= $J? <name> <number> <number> <number> + // + // <auto-nttp> ::= $ M <type> 1? <name> + // <auto-nttp> ::= $ M <type> H? <name> <number> + // <auto-nttp> ::= $ M <type> I? <name> <number> <number> + // <auto-nttp> ::= $ M <type> J? <name> <number> <number> <number> MSInheritanceModel IM = RD->getMSInheritanceModel(); @@ -716,7 +792,17 @@ MicrosoftCXXNameMangler::mangleMemberFunctionPointer(const CXXRecordDecl *RD, uint64_t VBTableOffset = 0; uint64_t VBPtrOffset = 0; if (MD) { - Out << Prefix << Code << '?'; + Out << Prefix; + + if (getASTContext().getLangOpts().isCompatibleWithMSVC( + LangOptions::MSVC2019) && + PD && PD->getType()->getTypeClass() == Type::Auto && + !TemplateArgType.isNull()) { + Out << "M"; + mangleType(TemplateArgType, SourceRange(), QMM_Drop); + } + + Out << Code << '?'; if (MD->isVirtual()) { MicrosoftVTableContext *VTContext = cast<MicrosoftVTableContext>(getASTContext().getVTableContext()); @@ -755,11 +841,83 @@ MicrosoftCXXNameMangler::mangleMemberFunctionPointer(const CXXRecordDecl *RD, mangleNumber(VBTableOffset); } +void MicrosoftCXXNameMangler::mangleFunctionPointer( + const FunctionDecl *FD, const NonTypeTemplateParmDecl *PD, + QualType TemplateArgType) { + // <func-ptr> ::= $1? <mangled-name> + // <func-ptr> ::= <auto-nttp> + // + // <auto-nttp> ::= $ M <type> 1? <mangled-name> + Out << '$'; + + if (getASTContext().getLangOpts().isCompatibleWithMSVC( + LangOptions::MSVC2019) && + PD && PD->getType()->getTypeClass() == Type::Auto && + !TemplateArgType.isNull()) { + Out << "M"; + mangleType(TemplateArgType, SourceRange(), QMM_Drop); + } + + Out << "1?"; + mangleName(FD); + mangleFunctionEncoding(FD, /*ShouldMangle=*/true); +} + +void MicrosoftCXXNameMangler::mangleVarDecl(const VarDecl *VD, + const NonTypeTemplateParmDecl *PD, + QualType TemplateArgType) { + // <var-ptr> ::= $1? <mangled-name> + // <var-ptr> ::= <auto-nttp> + // + // <auto-nttp> ::= $ M <type> 1? <mangled-name> + Out << '$'; + + if (getASTContext().getLangOpts().isCompatibleWithMSVC( + LangOptions::MSVC2019) && + PD && PD->getType()->getTypeClass() == Type::Auto && + !TemplateArgType.isNull()) { + Out << "M"; + mangleType(TemplateArgType, SourceRange(), QMM_Drop); + } + + Out << "1?"; + mangleName(VD); + mangleVariableEncoding(VD); +} + +void MicrosoftCXXNameMangler::mangleMemberFunctionPointerInClassNTTP( + const CXXRecordDecl *RD, const CXXMethodDecl *MD) { + // <nttp-class-member-function-pointer> ::= <member-function-pointer> + // ::= N + // ::= E? <virtual-mem-ptr-thunk> + // ::= E? <mangled-name> <type-encoding> + + if (!MD) { + if (RD->getMSInheritanceModel() != MSInheritanceModel::Single) + return mangleMemberFunctionPointer(RD, MD, nullptr, QualType(), ""); + + Out << 'N'; + return; + } + + Out << "E?"; + if (MD->isVirtual()) { + MicrosoftVTableContext *VTContext = + cast<MicrosoftVTableContext>(getASTContext().getVTableContext()); + MethodVFTableLocation ML = + VTContext->getMethodVFTableLocation(GlobalDecl(MD)); + mangleVirtualMemPtrThunk(MD, ML); + } else { + mangleName(MD); + mangleFunctionEncoding(MD, /*ShouldMangle=*/true); + } +} + void MicrosoftCXXNameMangler::mangleVirtualMemPtrThunk( const CXXMethodDecl *MD, const MethodVFTableLocation &ML) { // Get the vftable offset. CharUnits PointerWidth = getASTContext().toCharUnitsFromBits( - getASTContext().getTargetInfo().getPointerWidth(0)); + getASTContext().getTargetInfo().getPointerWidth(LangAS::Default)); uint64_t OffsetInVFTable = ML.Index * PointerWidth.getQuantity(); Out << "?_9"; @@ -767,16 +925,17 @@ void MicrosoftCXXNameMangler::mangleVirtualMemPtrThunk( Out << "$B"; mangleNumber(OffsetInVFTable); Out << 'A'; - mangleCallingConvention(MD->getType()->castAs<FunctionProtoType>()); + mangleCallingConvention(MD->getType()->castAs<FunctionProtoType>(), + MD->getSourceRange()); } -void MicrosoftCXXNameMangler::mangleName(const NamedDecl *ND) { +void MicrosoftCXXNameMangler::mangleName(GlobalDecl GD) { // <name> ::= <unscoped-name> {[<named-scope>]+ | [<nested-name>]}? @ // Always start with the unqualified name. - mangleUnqualifiedName(ND); + mangleUnqualifiedName(GD); - mangleNestedName(ND); + mangleNestedName(GD); // Terminate the whole name with an '@'. Out << '@'; @@ -791,8 +950,8 @@ void MicrosoftCXXNameMangler::mangleNumber(llvm::APSInt Number) { // to convert every integer to signed 64 bit before mangling (including // unsigned 64 bit values). Do the same, but preserve bits beyond the bottom // 64. - llvm::APInt Value = - Number.isSigned() ? Number.sextOrSelf(64) : Number.zextOrSelf(64); + unsigned Width = std::max(Number.getBitWidth(), 64U); + llvm::APInt Value = Number.extend(Width); // <non-negative integer> ::= A@ # when Number == 0 // ::= <decimal digit> # when 1 <= Number <= 10 @@ -821,6 +980,17 @@ void MicrosoftCXXNameMangler::mangleFloat(llvm::APFloat Number) { case APFloat::S_x87DoubleExtended: Out << 'X'; break; case APFloat::S_IEEEquad: Out << 'Y'; break; case APFloat::S_PPCDoubleDouble: Out << 'Z'; break; + case APFloat::S_Float8E5M2: + case APFloat::S_Float8E4M3: + case APFloat::S_Float8E4M3FN: + case APFloat::S_Float8E5M2FNUZ: + case APFloat::S_Float8E4M3FNUZ: + case APFloat::S_Float8E4M3B11FNUZ: + case APFloat::S_FloatTF32: + case APFloat::S_Float6E3M2FN: + case APFloat::S_Float6E2M3FN: + case APFloat::S_Float4E2M1FN: + llvm_unreachable("Tried to mangle unexpected APFloat semantics"); } mangleBits(Number.bitcastToAPInt()); @@ -844,13 +1014,14 @@ void MicrosoftCXXNameMangler::mangleBits(llvm::APInt Value) { } } -static const TemplateDecl * -isTemplate(const NamedDecl *ND, const TemplateArgumentList *&TemplateArgs) { +static GlobalDecl isTemplate(GlobalDecl GD, + const TemplateArgumentList *&TemplateArgs) { + const NamedDecl *ND = cast<NamedDecl>(GD.getDecl()); // Check if we have a function template. if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) { if (const TemplateDecl *TD = FD->getPrimaryTemplate()) { TemplateArgs = FD->getTemplateSpecializationArgs(); - return TD; + return GD.getWithDecl(TD); } } @@ -858,21 +1029,22 @@ isTemplate(const NamedDecl *ND, const TemplateArgumentList *&TemplateArgs) { if (const ClassTemplateSpecializationDecl *Spec = dyn_cast<ClassTemplateSpecializationDecl>(ND)) { TemplateArgs = &Spec->getTemplateArgs(); - return Spec->getSpecializedTemplate(); + return GD.getWithDecl(Spec->getSpecializedTemplate()); } // Check if we have a variable template. if (const VarTemplateSpecializationDecl *Spec = dyn_cast<VarTemplateSpecializationDecl>(ND)) { TemplateArgs = &Spec->getTemplateArgs(); - return Spec->getSpecializedTemplate(); + return GD.getWithDecl(Spec->getSpecializedTemplate()); } - return nullptr; + return GlobalDecl(); } -void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND, +void MicrosoftCXXNameMangler::mangleUnqualifiedName(GlobalDecl GD, DeclarationName Name) { + const NamedDecl *ND = cast<NamedDecl>(GD.getDecl()); // <unqualified-name> ::= <operator-name> // ::= <ctor-dtor-name> // ::= <source-name> @@ -880,11 +1052,11 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND, // Check if we have a template. const TemplateArgumentList *TemplateArgs = nullptr; - if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) { + if (GlobalDecl TD = isTemplate(GD, TemplateArgs)) { // Function templates aren't considered for name back referencing. This // makes sense since function templates aren't likely to occur multiple // times in a symbol. - if (isa<FunctionTemplateDecl>(TD)) { + if (isa<FunctionTemplateDecl>(TD.getDecl())) { mangleTemplateInstantiationName(TD, *TemplateArgs); Out << '@'; return; @@ -945,7 +1117,19 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND, switch (Name.getNameKind()) { case DeclarationName::Identifier: { if (const IdentifierInfo *II = Name.getAsIdentifierInfo()) { - mangleSourceName(II->getName()); + bool IsDeviceStub = + ND && + ((isa<FunctionDecl>(ND) && ND->hasAttr<CUDAGlobalAttr>()) || + (isa<FunctionTemplateDecl>(ND) && + cast<FunctionTemplateDecl>(ND) + ->getTemplatedDecl() + ->hasAttr<CUDAGlobalAttr>())) && + GD.getKernelReferenceKind() == KernelReferenceKind::Stub; + if (IsDeviceStub) + mangleSourceName( + (llvm::Twine("__device_stub__") + II->getName()).str()); + else + mangleSourceName(II->getName()); break; } @@ -996,7 +1180,7 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND, if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) { Out << "?__N"; mangleTemplateArgValue(TPO->getType().getUnqualifiedType(), - TPO->getValue()); + TPO->getValue(), TplArgKind::ClassNTTP); break; } @@ -1146,7 +1330,13 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND, // <postfix> ::= <unqualified-name> [<postfix>] // ::= <substitution> [<postfix>] -void MicrosoftCXXNameMangler::mangleNestedName(const NamedDecl *ND) { +void MicrosoftCXXNameMangler::mangleNestedName(GlobalDecl GD) { + const NamedDecl *ND = cast<NamedDecl>(GD.getDecl()); + + if (const auto *ID = dyn_cast<IndirectFieldDecl>(ND)) + for (unsigned I = 1, IE = ID->getChainingSize(); I < IE; ++I) + mangleSourceName("<unnamed-tag>"); + const DeclContext *DC = getEffectiveDeclContext(ND); while (!DC->isTranslationUnit()) { if (isa<TagDecl>(ND) || isa<VarDecl>(ND)) { @@ -1214,9 +1404,9 @@ void MicrosoftCXXNameMangler::mangleNestedName(const NamedDecl *ND) { if (PointersAre64Bit) Out << 'E'; Out << 'A'; - mangleArtificialTagType(TTK_Struct, - Discriminate("__block_literal", Discriminator, - ParameterDiscriminator)); + mangleArtificialTagType(TagTypeKind::Struct, + Discriminate("__block_literal", Discriminator, + ParameterDiscriminator)); Out << "@Z"; // If the effective context was a Record, we have fully mangled the @@ -1229,7 +1419,7 @@ void MicrosoftCXXNameMangler::mangleNestedName(const NamedDecl *ND) { } else if (isa<NamedDecl>(DC)) { ND = cast<NamedDecl>(DC); if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) { - mangle(FD, "?"); + mangle(getGlobalDeclAsDeclContext(FD), "?"); break; } else { mangleUnqualifiedName(ND); @@ -1418,7 +1608,7 @@ void MicrosoftCXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) { } void MicrosoftCXXNameMangler::mangleTemplateInstantiationName( - const TemplateDecl *TD, const TemplateArgumentList &TemplateArgs) { + GlobalDecl GD, const TemplateArgumentList &TemplateArgs) { // <template-name> ::= <unscoped-template-name> <template-args> // ::= <substitution> // Always start with the unqualified name. @@ -1433,8 +1623,8 @@ void MicrosoftCXXNameMangler::mangleTemplateInstantiationName( TemplateArgBackReferences.swap(OuterTemplateArgsContext); PassObjectSizeArgs.swap(OuterPassObjectSizeArgs); - mangleUnscopedTemplateName(TD); - mangleTemplateArgs(TD, TemplateArgs); + mangleUnscopedTemplateName(GD); + mangleTemplateArgs(cast<TemplateDecl>(GD.getDecl()), TemplateArgs); // Restore the previous back reference contexts. NameBackReferences.swap(OuterTemplateContext); @@ -1443,17 +1633,19 @@ void MicrosoftCXXNameMangler::mangleTemplateInstantiationName( PassObjectSizeArgs.swap(OuterPassObjectSizeArgs); } -void -MicrosoftCXXNameMangler::mangleUnscopedTemplateName(const TemplateDecl *TD) { +void MicrosoftCXXNameMangler::mangleUnscopedTemplateName(GlobalDecl GD) { // <unscoped-template-name> ::= ?$ <unqualified-name> Out << "?$"; - mangleUnqualifiedName(TD); + mangleUnqualifiedName(GD); } void MicrosoftCXXNameMangler::mangleIntegerLiteral( const llvm::APSInt &Value, const NonTypeTemplateParmDecl *PD, QualType TemplateArgType) { // <integer-literal> ::= $0 <number> + // <integer-literal> ::= <auto-nttp> + // + // <auto-nttp> ::= $ M <type> 0 <number> Out << "$"; // Since MSVC 2019, add 'M[<type>]' after '$' for auto template parameter when @@ -1474,7 +1666,7 @@ void MicrosoftCXXNameMangler::mangleIntegerLiteral( void MicrosoftCXXNameMangler::mangleExpression( const Expr *E, const NonTypeTemplateParmDecl *PD) { // See if this is a constant expression. - if (Optional<llvm::APSInt> Value = + if (std::optional<llvm::APSInt> Value = E->getIntegerConstantExpr(Context.getASTContext())) { mangleIntegerLiteral(*Value, PD, E->getType()); return; @@ -1507,6 +1699,22 @@ void MicrosoftCXXNameMangler::mangleTemplateArgs( } } +/// If value V (with type T) represents a decayed pointer to the first element +/// of an array, return that array. +static ValueDecl *getAsArrayToPointerDecayedDecl(QualType T, const APValue &V) { + // Must be a pointer... + if (!T->isPointerType() || !V.isLValue() || !V.hasLValuePath() || + !V.getLValueBase()) + return nullptr; + // ... to element 0 of an array. + QualType BaseT = V.getLValueBase().getType(); + if (!BaseT->isArrayType() || V.getLValuePath().size() != 1 || + V.getLValuePath()[0].getAsArrayIndex() != 0) + return nullptr; + return const_cast<ValueDecl *>( + V.getLValueBase().dyn_cast<const ValueDecl *>()); +} + void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD, const TemplateArgument &TA, const NamedDecl *Parm) { @@ -1515,8 +1723,11 @@ void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD, // ::= <member-data-pointer> // ::= <member-function-pointer> // ::= $ <constant-value> + // ::= $ <auto-nttp-constant-value> // ::= <template-args> // + // <auto-nttp-constant-value> ::= M <type> <constant-value> + // // <constant-value> ::= 0 <number> # integer // ::= 1 <mangled-name> # address of D // ::= 2 <type> <typed-constant-value>* @ # struct @@ -1530,7 +1741,6 @@ void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD, // ::= 8 <class> <unqualified-name> @ // ::= A <type> <non-negative integer> # float // ::= B <type> <non-negative integer> # double - // ::= E <mangled-name> # reference to D // # pointer to member, by component value // ::= F <number> <number> // ::= G <number> <number> <number> @@ -1558,24 +1768,29 @@ void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD, if (isa<FieldDecl>(ND) || isa<IndirectFieldDecl>(ND)) { mangleMemberDataPointer(cast<CXXRecordDecl>(ND->getDeclContext()) ->getMostRecentNonInjectedDecl(), - cast<ValueDecl>(ND)); + cast<ValueDecl>(ND), + cast<NonTypeTemplateParmDecl>(Parm), + TA.getParamTypeForDecl()); } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) { const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD); if (MD && MD->isInstance()) { mangleMemberFunctionPointer( - MD->getParent()->getMostRecentNonInjectedDecl(), MD); + MD->getParent()->getMostRecentNonInjectedDecl(), MD, + cast<NonTypeTemplateParmDecl>(Parm), TA.getParamTypeForDecl()); } else { - Out << "$1?"; - mangleName(FD); - mangleFunctionEncoding(FD, /*ShouldMangle=*/true); + mangleFunctionPointer(FD, cast<NonTypeTemplateParmDecl>(Parm), + TA.getParamTypeForDecl()); } } else if (TA.getParamTypeForDecl()->isRecordType()) { Out << "$"; auto *TPO = cast<TemplateParamObjectDecl>(ND); mangleTemplateArgValue(TPO->getType().getUnqualifiedType(), - TPO->getValue()); + TPO->getValue(), TplArgKind::ClassNTTP); + } else if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) { + mangleVarDecl(VD, cast<NonTypeTemplateParmDecl>(Parm), + TA.getParamTypeForDecl()); } else { - mangle(ND, TA.getParamTypeForDecl()->isReferenceType() ? "$E?" : "$1?"); + mangle(ND, "$1?"); } break; } @@ -1591,12 +1806,12 @@ void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD, const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl(); if (MPT->isMemberFunctionPointerType() && !isa<FunctionTemplateDecl>(TD)) { - mangleMemberFunctionPointer(RD, nullptr); + mangleMemberFunctionPointer(RD, nullptr, nullptr, QualType()); return; } if (MPT->isMemberDataPointer()) { if (!isa<FunctionTemplateDecl>(TD)) { - mangleMemberDataPointer(RD, nullptr); + mangleMemberDataPointer(RD, nullptr, nullptr, QualType()); return; } // nullptr data pointers are always represented with a single field @@ -1616,6 +1831,27 @@ void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD, cast<NonTypeTemplateParmDecl>(Parm), T); break; } + case TemplateArgument::StructuralValue: + if (ValueDecl *D = getAsArrayToPointerDecayedDecl( + TA.getStructuralValueType(), TA.getAsStructuralValue())) { + // Mangle the result of array-to-pointer decay as if it were a reference + // to the original declaration, to match MSVC's behavior. This can result + // in mangling collisions in some cases! + return mangleTemplateArg( + TD, TemplateArgument(D, TA.getStructuralValueType()), Parm); + } + Out << "$"; + if (cast<NonTypeTemplateParmDecl>(Parm) + ->getType() + ->getContainedDeducedType()) { + Out << "M"; + mangleType(TA.getNonTypeTemplateArgumentType(), SourceRange(), QMM_Drop); + } + mangleTemplateArgValue(TA.getStructuralValueType(), + TA.getAsStructuralValue(), + TplArgKind::StructuralValue, + /*WithScalarType=*/false); + break; case TemplateArgument::Expression: mangleExpression(TA.getAsExpr(), cast<NonTypeTemplateParmDecl>(Parm)); break; @@ -1658,6 +1894,7 @@ void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD, void MicrosoftCXXNameMangler::mangleTemplateArgValue(QualType T, const APValue &V, + TplArgKind TAK, bool WithScalarType) { switch (V.getKind()) { case APValue::None: @@ -1704,46 +1941,62 @@ void MicrosoftCXXNameMangler::mangleTemplateArgValue(QualType T, // FIXME: This can only happen as an extension. Invent a mangling. break; } else if (auto *VD = Base.dyn_cast<const ValueDecl*>()) { - Out << (T->isReferenceType() ? "E" : "1"); + Out << "E"; mangle(VD); } else { break; } } else { - unsigned NumAts = 0; - if (T->isPointerType()) { + if (TAK == TplArgKind::ClassNTTP && T->isPointerType()) Out << "5"; - ++NumAts; - } - QualType T = Base.getType(); + SmallVector<char, 2> EntryTypes; + SmallVector<std::function<void()>, 2> EntryManglers; + QualType ET = Base.getType(); for (APValue::LValuePathEntry E : V.getLValuePath()) { - // We don't know how to mangle array subscripting yet. - if (T->isArrayType()) - goto mangling_unknown; + if (auto *AT = ET->getAsArrayTypeUnsafe()) { + EntryTypes.push_back('C'); + EntryManglers.push_back([this, I = E.getAsArrayIndex()] { + Out << '0'; + mangleNumber(I); + Out << '@'; + }); + ET = AT->getElementType(); + continue; + } const Decl *D = E.getAsBaseOrMember().getPointer(); - auto *FD = dyn_cast<FieldDecl>(D); - // We don't know how to mangle derived-to-base conversions yet. - if (!FD) - goto mangling_unknown; - - Out << "6"; - ++NumAts; - T = FD->getType(); + if (auto *FD = dyn_cast<FieldDecl>(D)) { + ET = FD->getType(); + if (const auto *RD = ET->getAsRecordDecl()) + if (RD->isAnonymousStructOrUnion()) + continue; + } else { + ET = getASTContext().getRecordType(cast<CXXRecordDecl>(D)); + // Bug in MSVC: fully qualified name of base class should be used for + // mangling to prevent collisions e.g. on base classes with same names + // in different namespaces. + } + + EntryTypes.push_back('6'); + EntryManglers.push_back([this, D] { + mangleUnqualifiedName(cast<NamedDecl>(D)); + Out << '@'; + }); } + for (auto I = EntryTypes.rbegin(), E = EntryTypes.rend(); I != E; ++I) + Out << *I; + auto *VD = Base.dyn_cast<const ValueDecl*>(); if (!VD) break; - Out << "E"; + Out << (TAK == TplArgKind::ClassNTTP ? 'E' : '1'); mangle(VD); - for (APValue::LValuePathEntry E : V.getLValuePath()) { - const Decl *D = E.getAsBaseOrMember().getPointer(); - mangleUnqualifiedName(cast<FieldDecl>(D)); - } - for (unsigned I = 0; I != NumAts; ++I) + for (const std::function<void()> &Mangler : EntryManglers) + Mangler(); + if (TAK == TplArgKind::ClassNTTP && T->isPointerType()) Out << '@'; } @@ -1754,20 +2007,22 @@ void MicrosoftCXXNameMangler::mangleTemplateArgValue(QualType T, if (WithScalarType) mangleType(T, SourceRange(), QMM_Escape); - // FIXME: The below manglings don't include a conversion, so bail if there - // would be one. MSVC mangles the (possibly converted) value of the - // pointer-to-member object as if it were a struct, leading to collisions - // in some cases. - if (!V.getMemberPointerPath().empty()) - break; - const CXXRecordDecl *RD = T->castAs<MemberPointerType>()->getMostRecentCXXRecordDecl(); const ValueDecl *D = V.getMemberPointerDecl(); - if (T->isMemberDataPointerType()) - mangleMemberDataPointer(RD, D, ""); - else - mangleMemberFunctionPointer(RD, cast_or_null<CXXMethodDecl>(D), ""); + if (TAK == TplArgKind::ClassNTTP) { + if (T->isMemberDataPointerType()) + mangleMemberDataPointerInClassNTTP(RD, D); + else + mangleMemberFunctionPointerInClassNTTP(RD, + cast_or_null<CXXMethodDecl>(D)); + } else { + if (T->isMemberDataPointerType()) + mangleMemberDataPointer(RD, D, nullptr, QualType(), ""); + else + mangleMemberFunctionPointer(RD, cast_or_null<CXXMethodDecl>(D), nullptr, + QualType(), ""); + } return; } @@ -1779,11 +2034,11 @@ void MicrosoftCXXNameMangler::mangleTemplateArgValue(QualType T, unsigned BaseIndex = 0; for (const CXXBaseSpecifier &B : RD->bases()) - mangleTemplateArgValue(B.getType(), V.getStructBase(BaseIndex++)); + mangleTemplateArgValue(B.getType(), V.getStructBase(BaseIndex++), TAK); for (const FieldDecl *FD : RD->fields()) - if (!FD->isUnnamedBitfield()) + if (!FD->isUnnamedBitField()) mangleTemplateArgValue(FD->getType(), - V.getStructField(FD->getFieldIndex()), + V.getStructField(FD->getFieldIndex()), TAK, /*WithScalarType*/ true); Out << '@'; return; @@ -1794,7 +2049,7 @@ void MicrosoftCXXNameMangler::mangleTemplateArgValue(QualType T, mangleType(T, SourceRange(), QMM_Escape); if (const FieldDecl *FD = V.getUnionField()) { mangleUnqualifiedName(FD); - mangleTemplateArgValue(FD->getType(), V.getUnionValue()); + mangleTemplateArgValue(FD->getType(), V.getUnionValue(), TAK); } Out << '@'; return; @@ -1826,7 +2081,7 @@ void MicrosoftCXXNameMangler::mangleTemplateArgValue(QualType T, const APValue &ElemV = I < V.getArrayInitializedElts() ? V.getArrayInitializedElt(I) : V.getArrayFiller(); - mangleTemplateArgValue(ElemT, ElemV); + mangleTemplateArgValue(ElemT, ElemV, TAK); Out << '@'; } Out << '@'; @@ -1843,7 +2098,7 @@ void MicrosoftCXXNameMangler::mangleTemplateArgValue(QualType T, mangleType(ElemT, SourceRange(), QMM_Escape); for (unsigned I = 0, N = V.getVectorLength(); I != N; ++I) { const APValue &ElemV = V.getVectorElt(I); - mangleTemplateArgValue(ElemT, ElemV); + mangleTemplateArgValue(ElemT, ElemV, TAK); Out << '@'; } Out << "@@"; @@ -1855,7 +2110,6 @@ void MicrosoftCXXNameMangler::mangleTemplateArgValue(QualType T, break; } -mangling_unknown: DiagnosticsEngine &Diags = Context.getDiags(); unsigned DiagID = Diags.getCustomDiagID( DiagnosticsEngine::Error, "cannot mangle this template argument yet"); @@ -1869,9 +2123,9 @@ void MicrosoftCXXNameMangler::mangleObjCProtocol(const ObjCProtocolDecl *PD) { Stream << "?$"; Extra.mangleSourceName("Protocol"); - Extra.mangleArtificialTagType(TTK_Struct, PD->getName()); + Extra.mangleArtificialTagType(TagTypeKind::Struct, PD->getName()); - mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__ObjC"}); + mangleArtificialTagType(TagTypeKind::Struct, TemplateMangling, {"__ObjC"}); } void MicrosoftCXXNameMangler::mangleObjCLifetime(const QualType Type, @@ -1900,7 +2154,7 @@ void MicrosoftCXXNameMangler::mangleObjCLifetime(const QualType Type, Extra.manglePointerExtQualifiers(Quals, Type); Extra.mangleType(Type, Range); - mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__ObjC"}); + mangleArtificialTagType(TagTypeKind::Struct, TemplateMangling, {"__ObjC"}); } void MicrosoftCXXNameMangler::mangleObjCKindOfType(const ObjCObjectType *T, @@ -1914,10 +2168,10 @@ void MicrosoftCXXNameMangler::mangleObjCKindOfType(const ObjCObjectType *T, Extra.mangleSourceName("KindOf"); Extra.mangleType(QualType(T, 0) .stripObjCKindOfType(getASTContext()) - ->getAs<ObjCObjectType>(), + ->castAs<ObjCObjectType>(), Quals, Range); - mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__ObjC"}); + mangleArtificialTagType(TagTypeKind::Struct, TemplateMangling, {"__ObjC"}); } void MicrosoftCXXNameMangler::mangleQualifiers(Qualifiers Quals, @@ -2118,7 +2372,8 @@ void MicrosoftCXXNameMangler::manglePassObjectSizeArg( if (Found == FunArgBackReferences.end()) { std::string Name = Dynamic ? "__pass_dynamic_object_size" : "__pass_object_size"; - mangleArtificialTagType(TTK_Enum, Name + llvm::utostr(Type), {"__clang"}); + mangleArtificialTagType(TagTypeKind::Enum, Name + llvm::utostr(Type), + {"__clang"}); if (FunArgBackReferences.size() < 10) { size_t Size = FunArgBackReferences.size(); @@ -2199,7 +2454,7 @@ void MicrosoftCXXNameMangler::mangleAddressSpaceType(QualType T, Extra.mangleType(T, Range, QMM_Escape); mangleQualifiers(Qualifiers(), false); - mangleArtificialTagType(TTK_Struct, ASMangling, {"__clang"}); + mangleArtificialTagType(TagTypeKind::Struct, ASMangling, {"__clang"}); } void MicrosoftCXXNameMangler::mangleType(QualType T, SourceRange Range, @@ -2381,13 +2636,13 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers, llvm_unreachable("placeholder types shouldn't get to name mangling"); case BuiltinType::ObjCId: - mangleArtificialTagType(TTK_Struct, "objc_object"); + mangleArtificialTagType(TagTypeKind::Struct, "objc_object"); break; case BuiltinType::ObjCClass: - mangleArtificialTagType(TTK_Struct, "objc_class"); + mangleArtificialTagType(TagTypeKind::Struct, "objc_class"); break; case BuiltinType::ObjCSel: - mangleArtificialTagType(TTK_Struct, "objc_selector"); + mangleArtificialTagType(TagTypeKind::Struct, "objc_selector"); break; #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ @@ -2397,27 +2652,27 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers, #include "clang/Basic/OpenCLImageTypes.def" case BuiltinType::OCLSampler: Out << "PA"; - mangleArtificialTagType(TTK_Struct, "ocl_sampler"); + mangleArtificialTagType(TagTypeKind::Struct, "ocl_sampler"); break; case BuiltinType::OCLEvent: Out << "PA"; - mangleArtificialTagType(TTK_Struct, "ocl_event"); + mangleArtificialTagType(TagTypeKind::Struct, "ocl_event"); break; case BuiltinType::OCLClkEvent: Out << "PA"; - mangleArtificialTagType(TTK_Struct, "ocl_clkevent"); + mangleArtificialTagType(TagTypeKind::Struct, "ocl_clkevent"); break; case BuiltinType::OCLQueue: Out << "PA"; - mangleArtificialTagType(TTK_Struct, "ocl_queue"); + mangleArtificialTagType(TagTypeKind::Struct, "ocl_queue"); break; case BuiltinType::OCLReserveID: Out << "PA"; - mangleArtificialTagType(TTK_Struct, "ocl_reserveid"); + mangleArtificialTagType(TagTypeKind::Struct, "ocl_reserveid"); break; -#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ - case BuiltinType::Id: \ - mangleArtificialTagType(TTK_Struct, "ocl_" #ExtType); \ +#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ + case BuiltinType::Id: \ + mangleArtificialTagType(TagTypeKind::Struct, "ocl_" #ExtType); \ break; #include "clang/Basic/OpenCLExtensionTypes.def" @@ -2426,13 +2681,29 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers, break; case BuiltinType::Float16: - mangleArtificialTagType(TTK_Struct, "_Float16", {"__clang"}); + mangleArtificialTagType(TagTypeKind::Struct, "_Float16", {"__clang"}); break; case BuiltinType::Half: - mangleArtificialTagType(TTK_Struct, "_Half", {"__clang"}); + if (!getASTContext().getLangOpts().HLSL) + mangleArtificialTagType(TagTypeKind::Struct, "_Half", {"__clang"}); + else if (getASTContext().getLangOpts().NativeHalfType) + Out << "$f16@"; + else + Out << "$halff@"; + break; + + case BuiltinType::BFloat16: + mangleArtificialTagType(TagTypeKind::Struct, "__bf16", {"__clang"}); break; +#define WASM_REF_TYPE(InternalName, MangledName, Id, SingletonId, AS) \ + case BuiltinType::Id: \ + mangleArtificialTagType(TagTypeKind::Struct, MangledName); \ + mangleArtificialTagType(TagTypeKind::Struct, MangledName, {"__clang"}); \ + break; + +#include "clang/Basic/WebAssemblyReferenceTypes.def" #define SVE_TYPE(Name, Id, SingletonId) \ case BuiltinType::Id: #include "clang/Basic/AArch64SVEACLETypes.def" @@ -2441,6 +2712,8 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers, #include "clang/Basic/PPCTypes.def" #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: #include "clang/Basic/RISCVVTypes.def" +#define AMDGPU_TYPE(Name, Id, SingletonId) case BuiltinType::Id: +#include "clang/Basic/AMDGPUTypes.def" case BuiltinType::ShortAccum: case BuiltinType::Accum: case BuiltinType::LongAccum: @@ -2465,7 +2738,7 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers, case BuiltinType::SatUShortFract: case BuiltinType::SatUFract: case BuiltinType::SatULongFract: - case BuiltinType::BFloat16: + case BuiltinType::Ibm128: case BuiltinType::Float128: { DiagnosticsEngine &Diags = Context.getDiags(); unsigned DiagID = Diags.getCustomDiagID( @@ -2514,7 +2787,7 @@ void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T, if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(D)) { if (MD->getParent()->isLambda()) IsInLambda = true; - if (MD->isInstance()) + if (MD->isImplicitObjectMemberFunction()) HasThisQuals = true; if (isa<CXXDestructorDecl>(MD)) { IsStructor = true; @@ -2538,7 +2811,7 @@ void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T, mangleQualifiers(Quals, /*IsMember=*/false); } - mangleCallingConvention(CC); + mangleCallingConvention(CC, Range); // <return-type> ::= <type> // ::= @ # structors (they have no declared return type) @@ -2568,7 +2841,7 @@ void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T, // Copy constructor closure always takes an unqualified reference. mangleFunctionArgumentType(getASTContext().getLValueReferenceType( Proto->getParamType(0) - ->getAs<LValueReferenceType>() + ->castAs<LValueReferenceType>() ->getPointeeType(), /*SpelledAsLValue=*/true), Range); @@ -2580,7 +2853,7 @@ void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T, return; } Out << '@'; - } else if (IsInLambda && D && isa<CXXConversionDecl>(D)) { + } else if (IsInLambda && isa_and_nonnull<CXXConversionDecl>(D)) { // The only lambda conversion operators are to function pointers, which // can differ by their calling convention and are typically deduced. So // we make sure that this type gets mangled properly. @@ -2623,6 +2896,10 @@ void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T, } else { // Happens for function pointer type arguments for example. for (unsigned I = 0, E = Proto->getNumParams(); I != E; ++I) { + // Explicit object parameters are prefixed by "_V". + if (I == 0 && D && D->getParamDecl(I)->isExplicitObjectParameter()) + Out << "_V"; + mangleFunctionArgumentType(Proto->getParamType(I), Range); // Mangle each pass_object_size parameter as if it's a parameter of enum // type passed directly after the parameter with the pass_object_size @@ -2688,7 +2965,7 @@ void MicrosoftCXXNameMangler::mangleFunctionClass(const FunctionDecl *FD) { case AS_none: llvm_unreachable("Unsupported access specifier"); case AS_private: - if (MD->isStatic()) + if (!MD->isImplicitObjectMemberFunction()) Out << 'C'; else if (IsVirtual) Out << 'E'; @@ -2696,7 +2973,7 @@ void MicrosoftCXXNameMangler::mangleFunctionClass(const FunctionDecl *FD) { Out << 'A'; break; case AS_protected: - if (MD->isStatic()) + if (!MD->isImplicitObjectMemberFunction()) Out << 'K'; else if (IsVirtual) Out << 'M'; @@ -2704,7 +2981,7 @@ void MicrosoftCXXNameMangler::mangleFunctionClass(const FunctionDecl *FD) { Out << 'I'; break; case AS_public: - if (MD->isStatic()) + if (!MD->isImplicitObjectMemberFunction()) Out << 'S'; else if (IsVirtual) Out << 'U'; @@ -2715,7 +2992,8 @@ void MicrosoftCXXNameMangler::mangleFunctionClass(const FunctionDecl *FD) { Out << 'Y'; } } -void MicrosoftCXXNameMangler::mangleCallingConvention(CallingConv CC) { +void MicrosoftCXXNameMangler::mangleCallingConvention(CallingConv CC, + SourceRange Range) { // <calling-convention> ::= A # __cdecl // ::= B # __export __cdecl // ::= C # __pascal @@ -2728,9 +3006,13 @@ void MicrosoftCXXNameMangler::mangleCallingConvention(CallingConv CC) { // ::= J # __export __fastcall // ::= Q # __vectorcall // ::= S # __attribute__((__swiftcall__)) // Clang-only - // ::= T # __attribute__((__swiftasynccall__)) + // ::= W # __attribute__((__swiftasynccall__)) + // ::= U # __attribute__((__preserve_most__)) + // ::= V # __attribute__((__preserve_none__)) // + // Clang-only // // Clang-only // ::= w # __regcall + // ::= x # __regcall4 // The 'export' calling conventions are from a bygone era // (*cough*Win16*cough*) when functions were declared for export with // that keyword. (It didn't actually export them, it just made them so @@ -2739,23 +3021,55 @@ void MicrosoftCXXNameMangler::mangleCallingConvention(CallingConv CC) { switch (CC) { default: - llvm_unreachable("Unsupported CC for mangling"); + break; case CC_Win64: case CC_X86_64SysV: - case CC_C: Out << 'A'; break; - case CC_X86Pascal: Out << 'C'; break; - case CC_X86ThisCall: Out << 'E'; break; - case CC_X86StdCall: Out << 'G'; break; - case CC_X86FastCall: Out << 'I'; break; - case CC_X86VectorCall: Out << 'Q'; break; - case CC_Swift: Out << 'S'; break; - case CC_SwiftAsync: Out << 'W'; break; - case CC_PreserveMost: Out << 'U'; break; - case CC_X86RegCall: Out << 'w'; break; + case CC_C: + Out << 'A'; + return; + case CC_X86Pascal: + Out << 'C'; + return; + case CC_X86ThisCall: + Out << 'E'; + return; + case CC_X86StdCall: + Out << 'G'; + return; + case CC_X86FastCall: + Out << 'I'; + return; + case CC_X86VectorCall: + Out << 'Q'; + return; + case CC_Swift: + Out << 'S'; + return; + case CC_SwiftAsync: + Out << 'W'; + return; + case CC_PreserveMost: + Out << 'U'; + return; + case CC_PreserveNone: + Out << 'V'; + return; + case CC_X86RegCall: + if (getASTContext().getLangOpts().RegCall4) + Out << "x"; + else + Out << "w"; + return; } + + DiagnosticsEngine &Diags = Context.getDiags(); + unsigned DiagID = Diags.getCustomDiagID( + DiagnosticsEngine::Error, "cannot mangle this calling convention yet"); + Diags.Report(Range.getBegin(), DiagID) << Range; } -void MicrosoftCXXNameMangler::mangleCallingConvention(const FunctionType *T) { - mangleCallingConvention(T->getCallConv()); +void MicrosoftCXXNameMangler::mangleCallingConvention(const FunctionType *T, + SourceRange Range) { + mangleCallingConvention(T->getCallConv(), Range); } void MicrosoftCXXNameMangler::mangleThrowSpecification( @@ -2786,19 +3100,19 @@ void MicrosoftCXXNameMangler::mangleType(const UnresolvedUsingType *T, // <enum-type> ::= W4 <name> void MicrosoftCXXNameMangler::mangleTagTypeKind(TagTypeKind TTK) { switch (TTK) { - case TTK_Union: - Out << 'T'; - break; - case TTK_Struct: - case TTK_Interface: - Out << 'U'; - break; - case TTK_Class: - Out << 'V'; - break; - case TTK_Enum: - Out << "W4"; - break; + case TagTypeKind::Union: + Out << 'T'; + break; + case TagTypeKind::Struct: + case TagTypeKind::Interface: + Out << 'U'; + break; + case TagTypeKind::Class: + Out << 'V'; + break; + case TagTypeKind::Enum: + Out << "W4"; + break; } } void MicrosoftCXXNameMangler::mangleType(const EnumType *T, Qualifiers, @@ -2824,8 +3138,8 @@ void MicrosoftCXXNameMangler::mangleArtificialTagType( // Always start with the unqualified name. mangleSourceName(UnqualifiedName); - for (auto I = NestedNames.rbegin(), E = NestedNames.rend(); I != E; ++I) - mangleSourceName(*I); + for (StringRef N : llvm::reverse(NestedNames)) + mangleSourceName(N); // Terminate the whole name with an '@'. Out << '@'; @@ -2901,6 +3215,11 @@ void MicrosoftCXXNameMangler::mangleArrayType(const ArrayType *T) { mangleType(ElementTy, SourceRange(), QMM_Escape); } +void MicrosoftCXXNameMangler::mangleType(const ArrayParameterType *T, + Qualifiers, SourceRange) { + mangleArrayType(cast<ConstantArrayType>(T)); +} + // <type> ::= <pointer-to-member-type> // <pointer-to-member-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers> // <class name> <type> @@ -3008,11 +3327,11 @@ void MicrosoftCXXNameMangler::mangleType(const ComplexType *T, Qualifiers, Extra.mangleSourceName("_Complex"); Extra.mangleType(ElementType, Range, QMM_Escape); - mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__clang"}); + mangleArtificialTagType(TagTypeKind::Struct, TemplateMangling, {"__clang"}); } // Returns true for types that mangleArtificialTagType() gets called for with -// TTK_Union, TTK_Struct, TTK_Class and where compatibility with MSVC's +// TagTypeKind Union, Struct, Class and where compatibility with MSVC's // mangling matters. // (It doesn't matter for Objective-C types and the like that cl.exe doesn't // support.) @@ -3033,23 +3352,29 @@ bool MicrosoftCXXNameMangler::isArtificialTagType(QualType T) const { void MicrosoftCXXNameMangler::mangleType(const VectorType *T, Qualifiers Quals, SourceRange Range) { - const BuiltinType *ET = T->getElementType()->getAs<BuiltinType>(); - assert(ET && "vectors with non-builtin elements are unsupported"); + QualType EltTy = T->getElementType(); + const BuiltinType *ET = EltTy->getAs<BuiltinType>(); + const BitIntType *BitIntTy = EltTy->getAs<BitIntType>(); + assert((ET || BitIntTy) && + "vectors with non-builtin/_BitInt elements are unsupported"); uint64_t Width = getASTContext().getTypeSize(T); // Pattern match exactly the typedefs in our intrinsic headers. Anything that // doesn't match the Intel types uses a custom mangling below. size_t OutSizeBefore = Out.tell(); if (!isa<ExtVectorType>(T)) { - if (getASTContext().getTargetInfo().getTriple().isX86()) { + if (getASTContext().getTargetInfo().getTriple().isX86() && ET) { if (Width == 64 && ET->getKind() == BuiltinType::LongLong) { - mangleArtificialTagType(TTK_Union, "__m64"); + mangleArtificialTagType(TagTypeKind::Union, "__m64"); } else if (Width >= 128) { if (ET->getKind() == BuiltinType::Float) - mangleArtificialTagType(TTK_Union, "__m" + llvm::utostr(Width)); + mangleArtificialTagType(TagTypeKind::Union, + "__m" + llvm::utostr(Width)); else if (ET->getKind() == BuiltinType::LongLong) - mangleArtificialTagType(TTK_Union, "__m" + llvm::utostr(Width) + 'i'); + mangleArtificialTagType(TagTypeKind::Union, + "__m" + llvm::utostr(Width) + 'i'); else if (ET->getKind() == BuiltinType::Double) - mangleArtificialTagType(TTK_Struct, "__m" + llvm::utostr(Width) + 'd'); + mangleArtificialTagType(TagTypeKind::Struct, + "__m" + llvm::utostr(Width) + 'd'); } } } @@ -3065,10 +3390,11 @@ void MicrosoftCXXNameMangler::mangleType(const VectorType *T, Qualifiers Quals, MicrosoftCXXNameMangler Extra(Context, Stream); Stream << "?$"; Extra.mangleSourceName("__vector"); - Extra.mangleType(QualType(ET, 0), Range, QMM_Escape); + Extra.mangleType(QualType(ET ? static_cast<const Type *>(ET) : BitIntTy, 0), + Range, QMM_Escape); Extra.mangleIntegerLiteral(llvm::APSInt::getUnsigned(T->getNumElements())); - mangleArtificialTagType(TTK_Union, TemplateMangling, {"__clang"}); + mangleArtificialTagType(TagTypeKind::Union, TemplateMangling, {"__clang"}); } } @@ -3124,7 +3450,7 @@ void MicrosoftCXXNameMangler::mangleType(const DependentAddressSpaceType *T, void MicrosoftCXXNameMangler::mangleType(const ObjCInterfaceType *T, Qualifiers, SourceRange) { // ObjC interfaces have structs underlying them. - mangleTagTypeKind(TTK_Struct); + mangleTagTypeKind(TagTypeKind::Struct); mangleName(T->getDecl()); } @@ -3144,7 +3470,7 @@ void MicrosoftCXXNameMangler::mangleType(const ObjCObjectType *T, TemplateArgBackReferences.swap(OuterTemplateArgsContext); NameBackReferences.swap(OuterTemplateContext); - mangleTagTypeKind(TTK_Struct); + mangleTagTypeKind(TagTypeKind::Struct); Out << "?$"; if (T->isObjCId()) @@ -3223,6 +3549,12 @@ void MicrosoftCXXNameMangler::mangleType(const PackExpansionType *T, Qualifiers, << Range; } +void MicrosoftCXXNameMangler::mangleType(const PackIndexingType *T, + Qualifiers Quals, SourceRange Range) { + manglePointerCVQualifiers(Quals); + mangleType(T->getSelectedType(), Range); +} + void MicrosoftCXXNameMangler::mangleType(const TypeOfType *T, Qualifiers, SourceRange Range) { DiagnosticsEngine &Diags = Context.getDiags(); @@ -3292,7 +3624,7 @@ void MicrosoftCXXNameMangler::mangleType(const AtomicType *T, Qualifiers, Extra.mangleSourceName("_Atomic"); Extra.mangleType(ValueType, Range, QMM_Escape); - mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__clang"}); + mangleArtificialTagType(TagTypeKind::Struct, TemplateMangling, {"__clang"}); } void MicrosoftCXXNameMangler::mangleType(const PipeType *T, Qualifiers, @@ -3307,7 +3639,7 @@ void MicrosoftCXXNameMangler::mangleType(const PipeType *T, Qualifiers, Extra.mangleType(ElementType, Range, QMM_Escape); Extra.mangleIntegerLiteral(llvm::APSInt::get(T->isReadOnly())); - mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__clang"}); + mangleArtificialTagType(TagTypeKind::Struct, TemplateMangling, {"__clang"}); } void MicrosoftMangleContextImpl::mangleCXXName(GlobalDecl GD, @@ -3322,39 +3654,39 @@ void MicrosoftMangleContextImpl::mangleCXXName(GlobalDecl GD, if (auto *CD = dyn_cast<CXXConstructorDecl>(D)) { auto Type = GD.getCtorType(); MicrosoftCXXNameMangler mangler(*this, MHO, CD, Type); - return mangler.mangle(D); + return mangler.mangle(GD); } if (auto *DD = dyn_cast<CXXDestructorDecl>(D)) { auto Type = GD.getDtorType(); MicrosoftCXXNameMangler mangler(*this, MHO, DD, Type); - return mangler.mangle(D); + return mangler.mangle(GD); } MicrosoftCXXNameMangler Mangler(*this, MHO); - return Mangler.mangle(D); + return Mangler.mangle(GD); } -void MicrosoftCXXNameMangler::mangleType(const ExtIntType *T, Qualifiers, +void MicrosoftCXXNameMangler::mangleType(const BitIntType *T, Qualifiers, SourceRange Range) { llvm::SmallString<64> TemplateMangling; llvm::raw_svector_ostream Stream(TemplateMangling); MicrosoftCXXNameMangler Extra(Context, Stream); Stream << "?$"; if (T->isUnsigned()) - Extra.mangleSourceName("_UExtInt"); + Extra.mangleSourceName("_UBitInt"); else - Extra.mangleSourceName("_ExtInt"); + Extra.mangleSourceName("_BitInt"); Extra.mangleIntegerLiteral(llvm::APSInt::getUnsigned(T->getNumBits())); - mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__clang"}); + mangleArtificialTagType(TagTypeKind::Struct, TemplateMangling, {"__clang"}); } -void MicrosoftCXXNameMangler::mangleType(const DependentExtIntType *T, +void MicrosoftCXXNameMangler::mangleType(const DependentBitIntType *T, Qualifiers, SourceRange Range) { DiagnosticsEngine &Diags = Context.getDiags(); unsigned DiagID = Diags.getCustomDiagID( - DiagnosticsEngine::Error, "cannot mangle this DependentExtInt type yet"); + DiagnosticsEngine::Error, "cannot mangle this DependentBitInt type yet"); Diags.Report(Range.getBegin(), DiagID) << Range; } @@ -3457,6 +3789,7 @@ void MicrosoftMangleContextImpl::mangleVirtualMemPtrThunk( void MicrosoftMangleContextImpl::mangleThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk, + bool /*ElideOverrideInfo*/, raw_ostream &Out) { msvc_hashing_ostream MHO(Out); MicrosoftCXXNameMangler Mangler(*this, MHO); @@ -3478,9 +3811,11 @@ void MicrosoftMangleContextImpl::mangleThunk(const CXXMethodDecl *MD, DeclForFPT->getType()->castAs<FunctionProtoType>(), MD); } -void MicrosoftMangleContextImpl::mangleCXXDtorThunk( - const CXXDestructorDecl *DD, CXXDtorType Type, - const ThisAdjustment &Adjustment, raw_ostream &Out) { +void MicrosoftMangleContextImpl::mangleCXXDtorThunk(const CXXDestructorDecl *DD, + CXXDtorType Type, + const ThunkInfo &Thunk, + bool /*ElideOverrideInfo*/, + raw_ostream &Out) { // FIXME: Actually, the dtor thunk should be emitted for vector deleting // dtors rather than scalar deleting dtors. Just use the vector deleting dtor // mangling manually until we support both deleting dtor types. @@ -3489,6 +3824,7 @@ void MicrosoftMangleContextImpl::mangleCXXDtorThunk( MicrosoftCXXNameMangler Mangler(*this, MHO, DD, Type); Mangler.getStream() << "??_E"; Mangler.mangleName(DD->getParent()); + auto &Adjustment = Thunk.This; mangleThunkThisAdjustment(DD->getAccess(), Adjustment, Mangler, MHO); Mangler.mangleFunctionType(DD->getType()->castAs<FunctionProtoType>(), DD); } @@ -3513,6 +3849,12 @@ void MicrosoftMangleContextImpl::mangleCXXVFTable( Mangler.getStream() << '@'; } +void MicrosoftMangleContextImpl::mangleCXXVTable(const CXXRecordDecl *Derived, + raw_ostream &Out) { + // TODO: Determine appropriate mangling for MSABI + mangleCXXVFTable(Derived, {}, Out); +} + void MicrosoftMangleContextImpl::mangleCXXVBTable( const CXXRecordDecl *Derived, ArrayRef<const CXXRecordDecl *> BasePath, raw_ostream &Out) { @@ -3538,8 +3880,8 @@ void MicrosoftMangleContextImpl::mangleCXXRTTI(QualType T, raw_ostream &Out) { Mangler.getStream() << "@8"; } -void MicrosoftMangleContextImpl::mangleCXXRTTIName(QualType T, - raw_ostream &Out) { +void MicrosoftMangleContextImpl::mangleCXXRTTIName( + QualType T, raw_ostream &Out, bool NormalizeIntegers = false) { MicrosoftCXXNameMangler Mangler(*this, Out); Mangler.getStream() << '.'; Mangler.mangleType(T, SourceRange(), MicrosoftCXXNameMangler::QMM_Result); @@ -3602,7 +3944,7 @@ void MicrosoftMangleContextImpl::mangleCXXCatchableType( // FIXME: It is known that the Ctor is present in 2013, and in 2017.7 // (_MSC_VER 1914) and newer, and that it's omitted in 2015 and 2017.4 // (_MSC_VER 1911), but it's unknown when exactly it reappeared (1914? - // Or 1912, 1913 aleady?). + // Or 1912, 1913 already?). bool OmitCopyCtor = getASTContext().getLangOpts().isCompatibleWithMSVC( LangOptions::MSVC2015) && !getASTContext().getLangOpts().isCompatibleWithMSVC( @@ -3670,20 +4012,20 @@ void MicrosoftMangleContextImpl::mangleCXXRTTICompleteObjectLocator( llvm::raw_svector_ostream Stream(VFTableMangling); mangleCXXVFTable(Derived, BasePath, Stream); - if (VFTableMangling.startswith("??@")) { - assert(VFTableMangling.endswith("@")); + if (VFTableMangling.starts_with("??@")) { + assert(VFTableMangling.ends_with("@")); Out << VFTableMangling << "??_R4@"; return; } - assert(VFTableMangling.startswith("??_7") || - VFTableMangling.startswith("??_S")); + assert(VFTableMangling.starts_with("??_7") || + VFTableMangling.starts_with("??_S")); Out << "??_R4" << VFTableMangling.str().drop_front(4); } void MicrosoftMangleContextImpl::mangleSEHFilterExpression( - const NamedDecl *EnclosingDecl, raw_ostream &Out) { + GlobalDecl EnclosingDecl, raw_ostream &Out) { msvc_hashing_ostream MHO(Out); MicrosoftCXXNameMangler Mangler(*this, MHO); // The function body is in the same comdat as the function with the handler, @@ -3695,7 +4037,7 @@ void MicrosoftMangleContextImpl::mangleSEHFilterExpression( } void MicrosoftMangleContextImpl::mangleSEHFinallyBlock( - const NamedDecl *EnclosingDecl, raw_ostream &Out) { + GlobalDecl EnclosingDecl, raw_ostream &Out) { msvc_hashing_ostream MHO(Out); MicrosoftCXXNameMangler Mangler(*this, MHO); // The function body is in the same comdat as the function with the handler, @@ -3706,12 +4048,13 @@ void MicrosoftMangleContextImpl::mangleSEHFinallyBlock( Mangler.mangleName(EnclosingDecl); } -void MicrosoftMangleContextImpl::mangleTypeName(QualType T, raw_ostream &Out) { +void MicrosoftMangleContextImpl::mangleCanonicalTypeName( + QualType T, raw_ostream &Out, bool NormalizeIntegers = false) { // This is just a made up unique string for the purposes of tbaa. undname // does *not* know how to demangle it. MicrosoftCXXNameMangler Mangler(*this, Out); Mangler.getStream() << '?'; - Mangler.mangleType(T, SourceRange()); + Mangler.mangleType(T.getCanonicalType(), SourceRange()); } void MicrosoftMangleContextImpl::mangleReferenceTemporary( @@ -3719,7 +4062,8 @@ void MicrosoftMangleContextImpl::mangleReferenceTemporary( msvc_hashing_ostream MHO(Out); MicrosoftCXXNameMangler Mangler(*this, MHO); - Mangler.getStream() << "?$RT" << ManglingNumber << '@'; + Mangler.getStream() << "?"; + Mangler.mangleSourceName("$RT" + llvm::utostr(ManglingNumber)); Mangler.mangle(VD, ""); } @@ -3728,7 +4072,8 @@ void MicrosoftMangleContextImpl::mangleThreadSafeStaticGuardVariable( msvc_hashing_ostream MHO(Out); MicrosoftCXXNameMangler Mangler(*this, MHO); - Mangler.getStream() << "?$TSS" << GuardNum << '@'; + Mangler.getStream() << "?"; + Mangler.mangleSourceName("$TSS" + llvm::utostr(GuardNum)); Mangler.mangleNestedName(VD); Mangler.getStream() << "@4HA"; } @@ -3829,10 +4174,8 @@ void MicrosoftMangleContextImpl::mangleStringLiteral(const StringLiteral *SL, // char bar[42] = "foobar"; // Where it is truncated or zero-padded to fit the array. This is the length // used for mangling, and any trailing null-bytes also need to be mangled. - unsigned StringLength = getASTContext() - .getAsConstantArrayType(SL->getType()) - ->getSize() - .getZExtValue(); + unsigned StringLength = + getASTContext().getAsConstantArrayType(SL->getType())->getZExtSize(); unsigned StringByteLength = StringLength * SL->getCharByteWidth(); // <char-type>: The "kind" of string literal is encoded into the mangled name. @@ -3883,7 +4226,7 @@ void MicrosoftMangleContextImpl::mangleStringLiteral(const StringLiteral *SL, // - ?[A-Z]: The range from \xc1 to \xda. // - ?[0-9]: The set of [,/\:. \n\t'-]. // - ?$XX: A fallback which maps nibbles. - if (isIdentifierBody(Byte, /*AllowDollar=*/true)) { + if (isAsciiIdentifierContinue(Byte, /*AllowDollar=*/true)) { Mangler.getStream() << Byte; } else if (isLetter(Byte & 0x7f)) { Mangler.getStream() << '?' << static_cast<char>(Byte & 0x7f); @@ -3914,7 +4257,8 @@ void MicrosoftMangleContextImpl::mangleStringLiteral(const StringLiteral *SL, Mangler.getStream() << '@'; } -MicrosoftMangleContext * -MicrosoftMangleContext::create(ASTContext &Context, DiagnosticsEngine &Diags) { - return new MicrosoftMangleContextImpl(Context, Diags); +MicrosoftMangleContext *MicrosoftMangleContext::create(ASTContext &Context, + DiagnosticsEngine &Diags, + bool IsAux) { + return new MicrosoftMangleContextImpl(Context, Diags, IsAux); } diff --git a/contrib/llvm-project/clang/lib/AST/NSAPI.cpp b/contrib/llvm-project/clang/lib/AST/NSAPI.cpp index 861060d7c875..48d1763125e6 100644 --- a/contrib/llvm-project/clang/lib/AST/NSAPI.cpp +++ b/contrib/llvm-project/clang/lib/AST/NSAPI.cpp @@ -11,6 +11,7 @@ #include "clang/AST/DeclObjC.h" #include "clang/AST/Expr.h" #include "llvm/ADT/StringSwitch.h" +#include <optional> using namespace clang; @@ -55,10 +56,8 @@ Selector NSAPI::getNSStringSelector(NSStringMethodKind MK) const { &Ctx.Idents.get("initWithUTF8String")); break; case NSStr_stringWithCStringEncoding: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("stringWithCString"), - &Ctx.Idents.get("encoding") - }; + const IdentifierInfo *KeyIdents[] = {&Ctx.Idents.get("stringWithCString"), + &Ctx.Idents.get("encoding")}; Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } @@ -92,10 +91,8 @@ Selector NSAPI::getNSArraySelector(NSArrayMethodKind MK) const { Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("arrayWithObjects")); break; case NSArr_arrayWithObjectsCount: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("arrayWithObjects"), - &Ctx.Idents.get("count") - }; + const IdentifierInfo *KeyIdents[] = {&Ctx.Idents.get("arrayWithObjects"), + &Ctx.Idents.get("count")}; Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } @@ -109,10 +106,9 @@ Selector NSAPI::getNSArraySelector(NSArrayMethodKind MK) const { Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("objectAtIndex")); break; case NSMutableArr_replaceObjectAtIndex: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("replaceObjectAtIndex"), - &Ctx.Idents.get("withObject") - }; + const IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("replaceObjectAtIndex"), + &Ctx.Idents.get("withObject")}; Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } @@ -120,18 +116,14 @@ Selector NSAPI::getNSArraySelector(NSArrayMethodKind MK) const { Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("addObject")); break; case NSMutableArr_insertObjectAtIndex: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("insertObject"), - &Ctx.Idents.get("atIndex") - }; + const IdentifierInfo *KeyIdents[] = {&Ctx.Idents.get("insertObject"), + &Ctx.Idents.get("atIndex")}; Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } case NSMutableArr_setObjectAtIndexedSubscript: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("setObject"), - &Ctx.Idents.get("atIndexedSubscript") - }; + const IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("setObject"), &Ctx.Idents.get("atIndexedSubscript")}; Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } @@ -142,14 +134,15 @@ Selector NSAPI::getNSArraySelector(NSArrayMethodKind MK) const { return NSArraySelectors[MK]; } -Optional<NSAPI::NSArrayMethodKind> NSAPI::getNSArrayMethodKind(Selector Sel) { +std::optional<NSAPI::NSArrayMethodKind> +NSAPI::getNSArrayMethodKind(Selector Sel) { for (unsigned i = 0; i != NumNSArrayMethods; ++i) { NSArrayMethodKind MK = NSArrayMethodKind(i); if (Sel == getNSArraySelector(MK)) return MK; } - return None; + return std::nullopt; } Selector NSAPI::getNSDictionarySelector( @@ -165,27 +158,21 @@ Selector NSAPI::getNSDictionarySelector( &Ctx.Idents.get("dictionaryWithDictionary")); break; case NSDict_dictionaryWithObjectForKey: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("dictionaryWithObject"), - &Ctx.Idents.get("forKey") - }; + const IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("dictionaryWithObject"), &Ctx.Idents.get("forKey")}; Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } case NSDict_dictionaryWithObjectsForKeys: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("dictionaryWithObjects"), - &Ctx.Idents.get("forKeys") - }; + const IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("dictionaryWithObjects"), &Ctx.Idents.get("forKeys")}; Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } case NSDict_dictionaryWithObjectsForKeysCount: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("dictionaryWithObjects"), - &Ctx.Idents.get("forKeys"), - &Ctx.Idents.get("count") - }; + const IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("dictionaryWithObjects"), &Ctx.Idents.get("forKeys"), + &Ctx.Idents.get("count")}; Sel = Ctx.Selectors.getSelector(3, KeyIdents); break; } @@ -202,10 +189,8 @@ Selector NSAPI::getNSDictionarySelector( &Ctx.Idents.get("initWithObjectsAndKeys")); break; case NSDict_initWithObjectsForKeys: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("initWithObjects"), - &Ctx.Idents.get("forKeys") - }; + const IdentifierInfo *KeyIdents[] = {&Ctx.Idents.get("initWithObjects"), + &Ctx.Idents.get("forKeys")}; Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } @@ -213,26 +198,20 @@ Selector NSAPI::getNSDictionarySelector( Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("objectForKey")); break; case NSMutableDict_setObjectForKey: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("setObject"), - &Ctx.Idents.get("forKey") - }; + const IdentifierInfo *KeyIdents[] = {&Ctx.Idents.get("setObject"), + &Ctx.Idents.get("forKey")}; Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } case NSMutableDict_setObjectForKeyedSubscript: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("setObject"), - &Ctx.Idents.get("forKeyedSubscript") - }; + const IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("setObject"), &Ctx.Idents.get("forKeyedSubscript")}; Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } case NSMutableDict_setValueForKey: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("setValue"), - &Ctx.Idents.get("forKey") - }; + const IdentifierInfo *KeyIdents[] = {&Ctx.Idents.get("setValue"), + &Ctx.Idents.get("forKey")}; Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } @@ -243,7 +222,7 @@ Selector NSAPI::getNSDictionarySelector( return NSDictionarySelectors[MK]; } -Optional<NSAPI::NSDictionaryMethodKind> +std::optional<NSAPI::NSDictionaryMethodKind> NSAPI::getNSDictionaryMethodKind(Selector Sel) { for (unsigned i = 0; i != NumNSDictionaryMethods; ++i) { NSDictionaryMethodKind MK = NSDictionaryMethodKind(i); @@ -251,7 +230,7 @@ NSAPI::getNSDictionaryMethodKind(Selector Sel) { return MK; } - return None; + return std::nullopt; } Selector NSAPI::getNSSetSelector(NSSetMethodKind MK) const { @@ -262,34 +241,27 @@ Selector NSAPI::getNSSetSelector(NSSetMethodKind MK) const { Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("addObject")); break; case NSOrderedSet_insertObjectAtIndex: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("insertObject"), - &Ctx.Idents.get("atIndex") - }; + const IdentifierInfo *KeyIdents[] = {&Ctx.Idents.get("insertObject"), + &Ctx.Idents.get("atIndex")}; Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } case NSOrderedSet_setObjectAtIndex: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("setObject"), - &Ctx.Idents.get("atIndex") - }; + const IdentifierInfo *KeyIdents[] = {&Ctx.Idents.get("setObject"), + &Ctx.Idents.get("atIndex")}; Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } case NSOrderedSet_setObjectAtIndexedSubscript: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("setObject"), - &Ctx.Idents.get("atIndexedSubscript") - }; + const IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("setObject"), &Ctx.Idents.get("atIndexedSubscript")}; Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } case NSOrderedSet_replaceObjectAtIndexWithObject: { - IdentifierInfo *KeyIdents[] = { - &Ctx.Idents.get("replaceObjectAtIndex"), - &Ctx.Idents.get("withObject") - }; + const IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("replaceObjectAtIndex"), + &Ctx.Idents.get("withObject")}; Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } @@ -300,15 +272,14 @@ Selector NSAPI::getNSSetSelector(NSSetMethodKind MK) const { return NSSetSelectors[MK]; } -Optional<NSAPI::NSSetMethodKind> -NSAPI::getNSSetMethodKind(Selector Sel) { +std::optional<NSAPI::NSSetMethodKind> NSAPI::getNSSetMethodKind(Selector Sel) { for (unsigned i = 0; i != NumNSSetMethods; ++i) { NSSetMethodKind MK = NSSetMethodKind(i); if (Sel == getNSSetSelector(MK)) return MK; } - return None; + return std::nullopt; } Selector NSAPI::getNSNumberLiteralSelector(NSNumberLiteralMethodKind MK, @@ -363,7 +334,7 @@ Selector NSAPI::getNSNumberLiteralSelector(NSNumberLiteralMethodKind MK, return Sels[MK]; } -Optional<NSAPI::NSNumberLiteralMethodKind> +std::optional<NSAPI::NSNumberLiteralMethodKind> NSAPI::getNSNumberLiteralMethodKind(Selector Sel) const { for (unsigned i = 0; i != NumNSNumberLiteralMethods; ++i) { NSNumberLiteralMethodKind MK = NSNumberLiteralMethodKind(i); @@ -371,14 +342,14 @@ NSAPI::getNSNumberLiteralMethodKind(Selector Sel) const { return MK; } - return None; + return std::nullopt; } -Optional<NSAPI::NSNumberLiteralMethodKind> +std::optional<NSAPI::NSNumberLiteralMethodKind> NSAPI::getNSNumberFactoryMethodKind(QualType T) const { const BuiltinType *BT = T->getAs<BuiltinType>(); if (!BT) - return None; + return std::nullopt; const TypedefType *TDT = T->getAs<TypedefType>(); if (TDT) { @@ -456,6 +427,7 @@ NSAPI::getNSNumberFactoryMethodKind(QualType T) const { case BuiltinType::UInt128: case BuiltinType::Float16: case BuiltinType::Float128: + case BuiltinType::Ibm128: case BuiltinType::NullPtr: case BuiltinType::ObjCClass: case BuiltinType::ObjCId: @@ -479,7 +451,12 @@ NSAPI::getNSNumberFactoryMethodKind(QualType T) const { #include "clang/Basic/PPCTypes.def" #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: #include "clang/Basic/RISCVVTypes.def" +#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id: +#include "clang/Basic/WebAssemblyReferenceTypes.def" +#define AMDGPU_TYPE(Name, Id, SingletonId) case BuiltinType::Id: +#include "clang/Basic/AMDGPUTypes.def" case BuiltinType::BoundMember: + case BuiltinType::UnresolvedTemplate: case BuiltinType::Dependent: case BuiltinType::Overload: case BuiltinType::UnknownAny: @@ -488,14 +465,14 @@ NSAPI::getNSNumberFactoryMethodKind(QualType T) const { case BuiltinType::PseudoObject: case BuiltinType::BuiltinFn: case BuiltinType::IncompleteMatrixIdx: - case BuiltinType::OMPArraySection: + case BuiltinType::ArraySection: case BuiltinType::OMPArrayShaping: case BuiltinType::OMPIterator: case BuiltinType::BFloat16: break; } - return None; + return std::nullopt; } /// Returns true if \param T is a typedef of "BOOL" in objective-c. @@ -602,7 +579,7 @@ bool NSAPI::isObjCEnumerator(const Expr *E, Selector NSAPI::getOrInitSelector(ArrayRef<StringRef> Ids, Selector &Sel) const { if (Sel.isNull()) { - SmallVector<IdentifierInfo *, 4> Idents; + SmallVector<const IdentifierInfo *, 4> Idents; for (ArrayRef<StringRef>::const_iterator I = Ids.begin(), E = Ids.end(); I != E; ++I) Idents.push_back(&Ctx.Idents.get(*I)); @@ -613,7 +590,7 @@ Selector NSAPI::getOrInitSelector(ArrayRef<StringRef> Ids, Selector NSAPI::getOrInitNullarySelector(StringRef Id, Selector &Sel) const { if (Sel.isNull()) { - IdentifierInfo *Ident = &Ctx.Idents.get(Id); + const IdentifierInfo *Ident = &Ctx.Idents.get(Id); Sel = Ctx.Selectors.getSelector(0, &Ident); } return Sel; diff --git a/contrib/llvm-project/clang/lib/AST/NestedNameSpecifier.cpp b/contrib/llvm-project/clang/lib/AST/NestedNameSpecifier.cpp index 21afdd1570f4..785c46e86a77 100644 --- a/contrib/llvm-project/clang/lib/AST/NestedNameSpecifier.cpp +++ b/contrib/llvm-project/clang/lib/AST/NestedNameSpecifier.cpp @@ -55,16 +55,16 @@ NestedNameSpecifier::FindOrInsert(const ASTContext &Context, return NNS; } -NestedNameSpecifier * -NestedNameSpecifier::Create(const ASTContext &Context, - NestedNameSpecifier *Prefix, IdentifierInfo *II) { +NestedNameSpecifier *NestedNameSpecifier::Create(const ASTContext &Context, + NestedNameSpecifier *Prefix, + const IdentifierInfo *II) { assert(II && "Identifier cannot be NULL"); assert((!Prefix || Prefix->isDependent()) && "Prefix must be dependent"); NestedNameSpecifier Mockup; Mockup.Prefix.setPointer(Prefix); Mockup.Prefix.setInt(StoredIdentifier); - Mockup.Specifier = II; + Mockup.Specifier = const_cast<IdentifierInfo *>(II); return FindOrInsert(Context, Mockup); } @@ -87,7 +87,7 @@ NestedNameSpecifier::Create(const ASTContext &Context, NestedNameSpecifier * NestedNameSpecifier::Create(const ASTContext &Context, NestedNameSpecifier *Prefix, - NamespaceAliasDecl *Alias) { + const NamespaceAliasDecl *Alias) { assert(Alias && "Namespace alias cannot be NULL"); assert((!Prefix || (Prefix->getAsType() == nullptr && @@ -96,7 +96,7 @@ NestedNameSpecifier::Create(const ASTContext &Context, NestedNameSpecifier Mockup; Mockup.Prefix.setPointer(Prefix); Mockup.Prefix.setInt(StoredDecl); - Mockup.Specifier = Alias; + Mockup.Specifier = const_cast<NamespaceAliasDecl *>(Alias); return FindOrInsert(Context, Mockup); } @@ -112,13 +112,13 @@ NestedNameSpecifier::Create(const ASTContext &Context, return FindOrInsert(Context, Mockup); } -NestedNameSpecifier * -NestedNameSpecifier::Create(const ASTContext &Context, IdentifierInfo *II) { +NestedNameSpecifier *NestedNameSpecifier::Create(const ASTContext &Context, + const IdentifierInfo *II) { assert(II && "Identifier cannot be NULL"); NestedNameSpecifier Mockup; Mockup.Prefix.setPointer(nullptr); Mockup.Prefix.setInt(StoredIdentifier); - Mockup.Specifier = II; + Mockup.Specifier = const_cast<IdentifierInfo *>(II); return FindOrInsert(Context, Mockup); } @@ -280,14 +280,14 @@ void NestedNameSpecifier::print(raw_ostream &OS, const PrintingPolicy &Policy, case TypeSpecWithTemplate: OS << "template "; // Fall through to print the type. - LLVM_FALLTHROUGH; + [[fallthrough]]; case TypeSpec: { const auto *Record = dyn_cast_or_null<ClassTemplateSpecializationDecl>(getAsRecordDecl()); if (ResolveTemplateArguments && Record) { // Print the type trait with resolved template parameters. - Record->printName(OS); + Record->printName(OS, Policy); printTemplateArgumentList( OS, Record->getTemplateArgs().asArray(), Policy, Record->getSpecializedTemplate()->getTemplateParameters()); @@ -311,7 +311,8 @@ void NestedNameSpecifier::print(raw_ostream &OS, const PrintingPolicy &Policy, = dyn_cast<TemplateSpecializationType>(T)) { // Print the template name without its corresponding // nested-name-specifier. - SpecType->getTemplateName().print(OS, InnerPolicy, true); + SpecType->getTemplateName().print(OS, InnerPolicy, + TemplateName::Qualified::None); // Print the template argument list. printTemplateArgumentList(OS, SpecType->template_arguments(), diff --git a/contrib/llvm-project/clang/lib/AST/ODRDiagsEmitter.cpp b/contrib/llvm-project/clang/lib/AST/ODRDiagsEmitter.cpp new file mode 100644 index 000000000000..37f0f68c9235 --- /dev/null +++ b/contrib/llvm-project/clang/lib/AST/ODRDiagsEmitter.cpp @@ -0,0 +1,2218 @@ +//===-- ODRDiagsEmitter.cpp - Diagnostics for ODR mismatches ----*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/ODRDiagsEmitter.h" +#include "clang/AST/DeclFriend.h" +#include "clang/AST/DeclTemplate.h" +#include "clang/AST/ODRHash.h" +#include "clang/Basic/DiagnosticAST.h" +#include "clang/Basic/Module.h" + +using namespace clang; + +static unsigned computeODRHash(QualType Ty) { + ODRHash Hasher; + Hasher.AddQualType(Ty); + return Hasher.CalculateHash(); +} + +static unsigned computeODRHash(const Stmt *S) { + ODRHash Hasher; + Hasher.AddStmt(S); + return Hasher.CalculateHash(); +} + +static unsigned computeODRHash(const Decl *D) { + assert(D); + ODRHash Hasher; + Hasher.AddSubDecl(D); + return Hasher.CalculateHash(); +} + +static unsigned computeODRHash(const TemplateArgument &TA) { + ODRHash Hasher; + Hasher.AddTemplateArgument(TA); + return Hasher.CalculateHash(); +} + +std::string ODRDiagsEmitter::getOwningModuleNameForDiagnostic(const Decl *D) { + // If we know the owning module, use it. + if (Module *M = D->getImportedOwningModule()) + return M->getFullModuleName(); + + // Not from a module. + return {}; +} + +template <typename MethodT> +static bool diagnoseSubMismatchMethodParameters(DiagnosticsEngine &Diags, + const NamedDecl *FirstContainer, + StringRef FirstModule, + StringRef SecondModule, + const MethodT *FirstMethod, + const MethodT *SecondMethod) { + enum DiagMethodType { + DiagMethod, + DiagConstructor, + DiagDestructor, + }; + auto GetDiagMethodType = [](const NamedDecl *D) { + if (isa<CXXConstructorDecl>(D)) + return DiagConstructor; + if (isa<CXXDestructorDecl>(D)) + return DiagDestructor; + return DiagMethod; + }; + + enum ODRMethodParametersDifference { + NumberParameters, + ParameterType, + ParameterName, + }; + auto DiagError = [&Diags, &GetDiagMethodType, FirstContainer, FirstModule, + FirstMethod](ODRMethodParametersDifference DiffType) { + DeclarationName FirstName = FirstMethod->getDeclName(); + DiagMethodType FirstMethodType = GetDiagMethodType(FirstMethod); + return Diags.Report(FirstMethod->getLocation(), + diag::err_module_odr_violation_method_params) + << FirstContainer << FirstModule.empty() << FirstModule + << FirstMethod->getSourceRange() << DiffType << FirstMethodType + << FirstName; + }; + auto DiagNote = [&Diags, &GetDiagMethodType, SecondModule, + SecondMethod](ODRMethodParametersDifference DiffType) { + DeclarationName SecondName = SecondMethod->getDeclName(); + DiagMethodType SecondMethodType = GetDiagMethodType(SecondMethod); + return Diags.Report(SecondMethod->getLocation(), + diag::note_module_odr_violation_method_params) + << SecondModule.empty() << SecondModule + << SecondMethod->getSourceRange() << DiffType << SecondMethodType + << SecondName; + }; + + const unsigned FirstNumParameters = FirstMethod->param_size(); + const unsigned SecondNumParameters = SecondMethod->param_size(); + if (FirstNumParameters != SecondNumParameters) { + DiagError(NumberParameters) << FirstNumParameters; + DiagNote(NumberParameters) << SecondNumParameters; + return true; + } + + for (unsigned I = 0; I < FirstNumParameters; ++I) { + const ParmVarDecl *FirstParam = FirstMethod->getParamDecl(I); + const ParmVarDecl *SecondParam = SecondMethod->getParamDecl(I); + + QualType FirstParamType = FirstParam->getType(); + QualType SecondParamType = SecondParam->getType(); + if (FirstParamType != SecondParamType && + computeODRHash(FirstParamType) != computeODRHash(SecondParamType)) { + if (const DecayedType *ParamDecayedType = + FirstParamType->getAs<DecayedType>()) { + DiagError(ParameterType) << (I + 1) << FirstParamType << true + << ParamDecayedType->getOriginalType(); + } else { + DiagError(ParameterType) << (I + 1) << FirstParamType << false; + } + + if (const DecayedType *ParamDecayedType = + SecondParamType->getAs<DecayedType>()) { + DiagNote(ParameterType) << (I + 1) << SecondParamType << true + << ParamDecayedType->getOriginalType(); + } else { + DiagNote(ParameterType) << (I + 1) << SecondParamType << false; + } + return true; + } + + DeclarationName FirstParamName = FirstParam->getDeclName(); + DeclarationName SecondParamName = SecondParam->getDeclName(); + if (FirstParamName != SecondParamName) { + DiagError(ParameterName) << (I + 1) << FirstParamName; + DiagNote(ParameterName) << (I + 1) << SecondParamName; + return true; + } + } + + return false; +} + +bool ODRDiagsEmitter::diagnoseSubMismatchField( + const NamedDecl *FirstRecord, StringRef FirstModule, StringRef SecondModule, + const FieldDecl *FirstField, const FieldDecl *SecondField) const { + enum ODRFieldDifference { + FieldName, + FieldTypeName, + FieldSingleBitField, + FieldDifferentWidthBitField, + FieldSingleMutable, + FieldSingleInitializer, + FieldDifferentInitializers, + }; + + auto DiagError = [FirstRecord, FirstField, FirstModule, + this](ODRFieldDifference DiffType) { + return Diag(FirstField->getLocation(), diag::err_module_odr_violation_field) + << FirstRecord << FirstModule.empty() << FirstModule + << FirstField->getSourceRange() << DiffType; + }; + auto DiagNote = [SecondField, SecondModule, + this](ODRFieldDifference DiffType) { + return Diag(SecondField->getLocation(), + diag::note_module_odr_violation_field) + << SecondModule.empty() << SecondModule << SecondField->getSourceRange() << DiffType; + }; + + IdentifierInfo *FirstII = FirstField->getIdentifier(); + IdentifierInfo *SecondII = SecondField->getIdentifier(); + if (FirstII->getName() != SecondII->getName()) { + DiagError(FieldName) << FirstII; + DiagNote(FieldName) << SecondII; + return true; + } + + QualType FirstType = FirstField->getType(); + QualType SecondType = SecondField->getType(); + if (computeODRHash(FirstType) != computeODRHash(SecondType)) { + DiagError(FieldTypeName) << FirstII << FirstType; + DiagNote(FieldTypeName) << SecondII << SecondType; + return true; + } + + assert(Context.hasSameType(FirstField->getType(), SecondField->getType())); + (void)Context; + + const bool IsFirstBitField = FirstField->isBitField(); + const bool IsSecondBitField = SecondField->isBitField(); + if (IsFirstBitField != IsSecondBitField) { + DiagError(FieldSingleBitField) << FirstII << IsFirstBitField; + DiagNote(FieldSingleBitField) << SecondII << IsSecondBitField; + return true; + } + + if (IsFirstBitField && IsSecondBitField) { + unsigned FirstBitWidthHash = computeODRHash(FirstField->getBitWidth()); + unsigned SecondBitWidthHash = computeODRHash(SecondField->getBitWidth()); + if (FirstBitWidthHash != SecondBitWidthHash) { + DiagError(FieldDifferentWidthBitField) + << FirstII << FirstField->getBitWidth()->getSourceRange(); + DiagNote(FieldDifferentWidthBitField) + << SecondII << SecondField->getBitWidth()->getSourceRange(); + return true; + } + } + + if (!LangOpts.CPlusPlus) + return false; + + const bool IsFirstMutable = FirstField->isMutable(); + const bool IsSecondMutable = SecondField->isMutable(); + if (IsFirstMutable != IsSecondMutable) { + DiagError(FieldSingleMutable) << FirstII << IsFirstMutable; + DiagNote(FieldSingleMutable) << SecondII << IsSecondMutable; + return true; + } + + const Expr *FirstInitializer = FirstField->getInClassInitializer(); + const Expr *SecondInitializer = SecondField->getInClassInitializer(); + if ((!FirstInitializer && SecondInitializer) || + (FirstInitializer && !SecondInitializer)) { + DiagError(FieldSingleInitializer) + << FirstII << (FirstInitializer != nullptr); + DiagNote(FieldSingleInitializer) + << SecondII << (SecondInitializer != nullptr); + return true; + } + + if (FirstInitializer && SecondInitializer) { + unsigned FirstInitHash = computeODRHash(FirstInitializer); + unsigned SecondInitHash = computeODRHash(SecondInitializer); + if (FirstInitHash != SecondInitHash) { + DiagError(FieldDifferentInitializers) + << FirstII << FirstInitializer->getSourceRange(); + DiagNote(FieldDifferentInitializers) + << SecondII << SecondInitializer->getSourceRange(); + return true; + } + } + + return false; +} + +bool ODRDiagsEmitter::diagnoseSubMismatchTypedef( + const NamedDecl *FirstRecord, StringRef FirstModule, StringRef SecondModule, + const TypedefNameDecl *FirstTD, const TypedefNameDecl *SecondTD, + bool IsTypeAlias) const { + enum ODRTypedefDifference { + TypedefName, + TypedefType, + }; + + auto DiagError = [FirstRecord, FirstTD, FirstModule, + this](ODRTypedefDifference DiffType) { + return Diag(FirstTD->getLocation(), diag::err_module_odr_violation_typedef) + << FirstRecord << FirstModule.empty() << FirstModule + << FirstTD->getSourceRange() << DiffType; + }; + auto DiagNote = [SecondTD, SecondModule, + this](ODRTypedefDifference DiffType) { + return Diag(SecondTD->getLocation(), + diag::note_module_odr_violation_typedef) + << SecondModule << SecondTD->getSourceRange() << DiffType; + }; + + DeclarationName FirstName = FirstTD->getDeclName(); + DeclarationName SecondName = SecondTD->getDeclName(); + if (FirstName != SecondName) { + DiagError(TypedefName) << IsTypeAlias << FirstName; + DiagNote(TypedefName) << IsTypeAlias << SecondName; + return true; + } + + QualType FirstType = FirstTD->getUnderlyingType(); + QualType SecondType = SecondTD->getUnderlyingType(); + if (computeODRHash(FirstType) != computeODRHash(SecondType)) { + DiagError(TypedefType) << IsTypeAlias << FirstName << FirstType; + DiagNote(TypedefType) << IsTypeAlias << SecondName << SecondType; + return true; + } + return false; +} + +bool ODRDiagsEmitter::diagnoseSubMismatchVar(const NamedDecl *FirstRecord, + StringRef FirstModule, + StringRef SecondModule, + const VarDecl *FirstVD, + const VarDecl *SecondVD) const { + enum ODRVarDifference { + VarName, + VarType, + VarSingleInitializer, + VarDifferentInitializer, + VarConstexpr, + }; + + auto DiagError = [FirstRecord, FirstVD, FirstModule, + this](ODRVarDifference DiffType) { + return Diag(FirstVD->getLocation(), diag::err_module_odr_violation_variable) + << FirstRecord << FirstModule.empty() << FirstModule + << FirstVD->getSourceRange() << DiffType; + }; + auto DiagNote = [SecondVD, SecondModule, this](ODRVarDifference DiffType) { + return Diag(SecondVD->getLocation(), + diag::note_module_odr_violation_variable) + << SecondModule << SecondVD->getSourceRange() << DiffType; + }; + + DeclarationName FirstName = FirstVD->getDeclName(); + DeclarationName SecondName = SecondVD->getDeclName(); + if (FirstName != SecondName) { + DiagError(VarName) << FirstName; + DiagNote(VarName) << SecondName; + return true; + } + + QualType FirstType = FirstVD->getType(); + QualType SecondType = SecondVD->getType(); + if (computeODRHash(FirstType) != computeODRHash(SecondType)) { + DiagError(VarType) << FirstName << FirstType; + DiagNote(VarType) << SecondName << SecondType; + return true; + } + + if (!LangOpts.CPlusPlus) + return false; + + const Expr *FirstInit = FirstVD->getInit(); + const Expr *SecondInit = SecondVD->getInit(); + if ((FirstInit == nullptr) != (SecondInit == nullptr)) { + DiagError(VarSingleInitializer) + << FirstName << (FirstInit == nullptr) + << (FirstInit ? FirstInit->getSourceRange() : SourceRange()); + DiagNote(VarSingleInitializer) + << SecondName << (SecondInit == nullptr) + << (SecondInit ? SecondInit->getSourceRange() : SourceRange()); + return true; + } + + if (FirstInit && SecondInit && + computeODRHash(FirstInit) != computeODRHash(SecondInit)) { + DiagError(VarDifferentInitializer) + << FirstName << FirstInit->getSourceRange(); + DiagNote(VarDifferentInitializer) + << SecondName << SecondInit->getSourceRange(); + return true; + } + + const bool FirstIsConstexpr = FirstVD->isConstexpr(); + const bool SecondIsConstexpr = SecondVD->isConstexpr(); + if (FirstIsConstexpr != SecondIsConstexpr) { + DiagError(VarConstexpr) << FirstName << FirstIsConstexpr; + DiagNote(VarConstexpr) << SecondName << SecondIsConstexpr; + return true; + } + return false; +} + +bool ODRDiagsEmitter::diagnoseSubMismatchProtocols( + const ObjCProtocolList &FirstProtocols, + const ObjCContainerDecl *FirstContainer, StringRef FirstModule, + const ObjCProtocolList &SecondProtocols, + const ObjCContainerDecl *SecondContainer, StringRef SecondModule) const { + // Keep in sync with err_module_odr_violation_referenced_protocols. + enum ODRReferencedProtocolDifference { + NumProtocols, + ProtocolType, + }; + auto DiagRefProtocolError = [FirstContainer, FirstModule, + this](SourceLocation Loc, SourceRange Range, + ODRReferencedProtocolDifference DiffType) { + return Diag(Loc, diag::err_module_odr_violation_referenced_protocols) + << FirstContainer << FirstModule.empty() << FirstModule << Range + << DiffType; + }; + auto DiagRefProtocolNote = [SecondModule, + this](SourceLocation Loc, SourceRange Range, + ODRReferencedProtocolDifference DiffType) { + return Diag(Loc, diag::note_module_odr_violation_referenced_protocols) + << SecondModule.empty() << SecondModule << Range << DiffType; + }; + auto GetProtoListSourceRange = [](const ObjCProtocolList &PL) { + if (PL.empty()) + return SourceRange(); + return SourceRange(*PL.loc_begin(), *std::prev(PL.loc_end())); + }; + + if (FirstProtocols.size() != SecondProtocols.size()) { + DiagRefProtocolError(FirstContainer->getLocation(), + GetProtoListSourceRange(FirstProtocols), NumProtocols) + << FirstProtocols.size(); + DiagRefProtocolNote(SecondContainer->getLocation(), + GetProtoListSourceRange(SecondProtocols), NumProtocols) + << SecondProtocols.size(); + return true; + } + + for (unsigned I = 0, E = FirstProtocols.size(); I != E; ++I) { + const ObjCProtocolDecl *FirstProtocol = FirstProtocols[I]; + const ObjCProtocolDecl *SecondProtocol = SecondProtocols[I]; + DeclarationName FirstProtocolName = FirstProtocol->getDeclName(); + DeclarationName SecondProtocolName = SecondProtocol->getDeclName(); + if (FirstProtocolName != SecondProtocolName) { + SourceLocation FirstLoc = *(FirstProtocols.loc_begin() + I); + SourceLocation SecondLoc = *(SecondProtocols.loc_begin() + I); + SourceRange EmptyRange; + DiagRefProtocolError(FirstLoc, EmptyRange, ProtocolType) + << (I + 1) << FirstProtocolName; + DiagRefProtocolNote(SecondLoc, EmptyRange, ProtocolType) + << (I + 1) << SecondProtocolName; + return true; + } + } + + return false; +} + +bool ODRDiagsEmitter::diagnoseSubMismatchObjCMethod( + const NamedDecl *FirstObjCContainer, StringRef FirstModule, + StringRef SecondModule, const ObjCMethodDecl *FirstMethod, + const ObjCMethodDecl *SecondMethod) const { + enum ODRMethodDifference { + ReturnType, + InstanceOrClass, + ControlLevel, // optional/required + DesignatedInitializer, + Directness, + Name, + }; + + auto DiagError = [FirstObjCContainer, FirstModule, FirstMethod, + this](ODRMethodDifference DiffType) { + return Diag(FirstMethod->getLocation(), + diag::err_module_odr_violation_objc_method) + << FirstObjCContainer << FirstModule.empty() << FirstModule + << FirstMethod->getSourceRange() << DiffType; + }; + auto DiagNote = [SecondModule, SecondMethod, + this](ODRMethodDifference DiffType) { + return Diag(SecondMethod->getLocation(), + diag::note_module_odr_violation_objc_method) + << SecondModule.empty() << SecondModule + << SecondMethod->getSourceRange() << DiffType; + }; + + if (computeODRHash(FirstMethod->getReturnType()) != + computeODRHash(SecondMethod->getReturnType())) { + DiagError(ReturnType) << FirstMethod << FirstMethod->getReturnType(); + DiagNote(ReturnType) << SecondMethod << SecondMethod->getReturnType(); + return true; + } + + if (FirstMethod->isInstanceMethod() != SecondMethod->isInstanceMethod()) { + DiagError(InstanceOrClass) + << FirstMethod << FirstMethod->isInstanceMethod(); + DiagNote(InstanceOrClass) + << SecondMethod << SecondMethod->isInstanceMethod(); + return true; + } + if (FirstMethod->getImplementationControl() != + SecondMethod->getImplementationControl()) { + DiagError(ControlLevel) + << llvm::to_underlying(FirstMethod->getImplementationControl()); + DiagNote(ControlLevel) << llvm::to_underlying( + SecondMethod->getImplementationControl()); + return true; + } + if (FirstMethod->isThisDeclarationADesignatedInitializer() != + SecondMethod->isThisDeclarationADesignatedInitializer()) { + DiagError(DesignatedInitializer) + << FirstMethod + << FirstMethod->isThisDeclarationADesignatedInitializer(); + DiagNote(DesignatedInitializer) + << SecondMethod + << SecondMethod->isThisDeclarationADesignatedInitializer(); + return true; + } + if (FirstMethod->isDirectMethod() != SecondMethod->isDirectMethod()) { + DiagError(Directness) << FirstMethod << FirstMethod->isDirectMethod(); + DiagNote(Directness) << SecondMethod << SecondMethod->isDirectMethod(); + return true; + } + if (diagnoseSubMismatchMethodParameters(Diags, FirstObjCContainer, + FirstModule, SecondModule, + FirstMethod, SecondMethod)) + return true; + + // Check method name *after* looking at the parameters otherwise we get a + // less ideal diagnostics: a ObjCMethodName mismatch given that selectors + // for different parameters are likely to be different. + DeclarationName FirstName = FirstMethod->getDeclName(); + DeclarationName SecondName = SecondMethod->getDeclName(); + if (FirstName != SecondName) { + DiagError(Name) << FirstName; + DiagNote(Name) << SecondName; + return true; + } + + return false; +} + +bool ODRDiagsEmitter::diagnoseSubMismatchObjCProperty( + const NamedDecl *FirstObjCContainer, StringRef FirstModule, + StringRef SecondModule, const ObjCPropertyDecl *FirstProp, + const ObjCPropertyDecl *SecondProp) const { + enum ODRPropertyDifference { + Name, + Type, + ControlLevel, // optional/required + Attribute, + }; + + auto DiagError = [FirstObjCContainer, FirstModule, FirstProp, + this](SourceLocation Loc, ODRPropertyDifference DiffType) { + return Diag(Loc, diag::err_module_odr_violation_objc_property) + << FirstObjCContainer << FirstModule.empty() << FirstModule + << FirstProp->getSourceRange() << DiffType; + }; + auto DiagNote = [SecondModule, SecondProp, + this](SourceLocation Loc, ODRPropertyDifference DiffType) { + return Diag(Loc, diag::note_module_odr_violation_objc_property) + << SecondModule.empty() << SecondModule + << SecondProp->getSourceRange() << DiffType; + }; + + IdentifierInfo *FirstII = FirstProp->getIdentifier(); + IdentifierInfo *SecondII = SecondProp->getIdentifier(); + if (FirstII->getName() != SecondII->getName()) { + DiagError(FirstProp->getLocation(), Name) << FirstII; + DiagNote(SecondProp->getLocation(), Name) << SecondII; + return true; + } + if (computeODRHash(FirstProp->getType()) != + computeODRHash(SecondProp->getType())) { + DiagError(FirstProp->getLocation(), Type) + << FirstII << FirstProp->getType(); + DiagNote(SecondProp->getLocation(), Type) + << SecondII << SecondProp->getType(); + return true; + } + if (FirstProp->getPropertyImplementation() != + SecondProp->getPropertyImplementation()) { + DiagError(FirstProp->getLocation(), ControlLevel) + << FirstProp->getPropertyImplementation(); + DiagNote(SecondProp->getLocation(), ControlLevel) + << SecondProp->getPropertyImplementation(); + return true; + } + + // Go over the property attributes and stop at the first mismatch. + unsigned FirstAttrs = (unsigned)FirstProp->getPropertyAttributes(); + unsigned SecondAttrs = (unsigned)SecondProp->getPropertyAttributes(); + if (FirstAttrs != SecondAttrs) { + for (unsigned I = 0; I < NumObjCPropertyAttrsBits; ++I) { + unsigned CheckedAttr = (1 << I); + if ((FirstAttrs & CheckedAttr) == (SecondAttrs & CheckedAttr)) + continue; + + bool IsFirstWritten = + (unsigned)FirstProp->getPropertyAttributesAsWritten() & CheckedAttr; + bool IsSecondWritten = + (unsigned)SecondProp->getPropertyAttributesAsWritten() & CheckedAttr; + DiagError(IsFirstWritten ? FirstProp->getLParenLoc() + : FirstProp->getLocation(), + Attribute) + << FirstII << (I + 1) << IsFirstWritten; + DiagNote(IsSecondWritten ? SecondProp->getLParenLoc() + : SecondProp->getLocation(), + Attribute) + << SecondII << (I + 1); + return true; + } + } + + return false; +} + +ODRDiagsEmitter::DiffResult +ODRDiagsEmitter::FindTypeDiffs(DeclHashes &FirstHashes, + DeclHashes &SecondHashes) { + auto DifferenceSelector = [](const Decl *D) { + assert(D && "valid Decl required"); + switch (D->getKind()) { + default: + return Other; + case Decl::AccessSpec: + switch (D->getAccess()) { + case AS_public: + return PublicSpecifer; + case AS_private: + return PrivateSpecifer; + case AS_protected: + return ProtectedSpecifer; + case AS_none: + break; + } + llvm_unreachable("Invalid access specifier"); + case Decl::StaticAssert: + return StaticAssert; + case Decl::Field: + return Field; + case Decl::CXXMethod: + case Decl::CXXConstructor: + case Decl::CXXDestructor: + return CXXMethod; + case Decl::TypeAlias: + return TypeAlias; + case Decl::Typedef: + return TypeDef; + case Decl::Var: + return Var; + case Decl::Friend: + return Friend; + case Decl::FunctionTemplate: + return FunctionTemplate; + case Decl::ObjCMethod: + return ObjCMethod; + case Decl::ObjCIvar: + return ObjCIvar; + case Decl::ObjCProperty: + return ObjCProperty; + } + }; + + DiffResult DR; + auto FirstIt = FirstHashes.begin(); + auto SecondIt = SecondHashes.begin(); + while (FirstIt != FirstHashes.end() || SecondIt != SecondHashes.end()) { + if (FirstIt != FirstHashes.end() && SecondIt != SecondHashes.end() && + FirstIt->second == SecondIt->second) { + ++FirstIt; + ++SecondIt; + continue; + } + + DR.FirstDecl = FirstIt == FirstHashes.end() ? nullptr : FirstIt->first; + DR.SecondDecl = SecondIt == SecondHashes.end() ? nullptr : SecondIt->first; + + DR.FirstDiffType = + DR.FirstDecl ? DifferenceSelector(DR.FirstDecl) : EndOfClass; + DR.SecondDiffType = + DR.SecondDecl ? DifferenceSelector(DR.SecondDecl) : EndOfClass; + return DR; + } + return DR; +} + +void ODRDiagsEmitter::diagnoseSubMismatchUnexpected( + DiffResult &DR, const NamedDecl *FirstRecord, StringRef FirstModule, + const NamedDecl *SecondRecord, StringRef SecondModule) const { + Diag(FirstRecord->getLocation(), + diag::err_module_odr_violation_different_definitions) + << FirstRecord << FirstModule.empty() << FirstModule; + + if (DR.FirstDecl) { + Diag(DR.FirstDecl->getLocation(), diag::note_first_module_difference) + << FirstRecord << DR.FirstDecl->getSourceRange(); + } + + Diag(SecondRecord->getLocation(), + diag::note_module_odr_violation_different_definitions) + << SecondModule; + + if (DR.SecondDecl) { + Diag(DR.SecondDecl->getLocation(), diag::note_second_module_difference) + << DR.SecondDecl->getSourceRange(); + } +} + +void ODRDiagsEmitter::diagnoseSubMismatchDifferentDeclKinds( + DiffResult &DR, const NamedDecl *FirstRecord, StringRef FirstModule, + const NamedDecl *SecondRecord, StringRef SecondModule) const { + auto GetMismatchedDeclLoc = [](const NamedDecl *Container, + ODRMismatchDecl DiffType, const Decl *D) { + SourceLocation Loc; + SourceRange Range; + if (DiffType == EndOfClass) { + if (auto *Tag = dyn_cast<TagDecl>(Container)) + Loc = Tag->getBraceRange().getEnd(); + else if (auto *IF = dyn_cast<ObjCInterfaceDecl>(Container)) + Loc = IF->getAtEndRange().getBegin(); + else + Loc = Container->getEndLoc(); + } else { + Loc = D->getLocation(); + Range = D->getSourceRange(); + } + return std::make_pair(Loc, Range); + }; + + auto FirstDiagInfo = + GetMismatchedDeclLoc(FirstRecord, DR.FirstDiffType, DR.FirstDecl); + Diag(FirstDiagInfo.first, diag::err_module_odr_violation_mismatch_decl) + << FirstRecord << FirstModule.empty() << FirstModule + << FirstDiagInfo.second << DR.FirstDiffType; + + auto SecondDiagInfo = + GetMismatchedDeclLoc(SecondRecord, DR.SecondDiffType, DR.SecondDecl); + Diag(SecondDiagInfo.first, diag::note_module_odr_violation_mismatch_decl) + << SecondModule.empty() << SecondModule << SecondDiagInfo.second + << DR.SecondDiffType; +} + +bool ODRDiagsEmitter::diagnoseMismatch( + const CXXRecordDecl *FirstRecord, const CXXRecordDecl *SecondRecord, + const struct CXXRecordDecl::DefinitionData *SecondDD) const { + // Multiple different declarations got merged together; tell the user + // where they came from. + if (FirstRecord == SecondRecord) + return false; + + std::string FirstModule = getOwningModuleNameForDiagnostic(FirstRecord); + std::string SecondModule = getOwningModuleNameForDiagnostic(SecondRecord); + + const struct CXXRecordDecl::DefinitionData *FirstDD = + FirstRecord->DefinitionData; + assert(FirstDD && SecondDD && "Definitions without DefinitionData"); + + // Diagnostics from DefinitionData are emitted here. + if (FirstDD != SecondDD) { + // Keep in sync with err_module_odr_violation_definition_data. + enum ODRDefinitionDataDifference { + NumBases, + NumVBases, + BaseType, + BaseVirtual, + BaseAccess, + }; + auto DiagBaseError = [FirstRecord, &FirstModule, + this](SourceLocation Loc, SourceRange Range, + ODRDefinitionDataDifference DiffType) { + return Diag(Loc, diag::err_module_odr_violation_definition_data) + << FirstRecord << FirstModule.empty() << FirstModule << Range + << DiffType; + }; + auto DiagBaseNote = [&SecondModule, + this](SourceLocation Loc, SourceRange Range, + ODRDefinitionDataDifference DiffType) { + return Diag(Loc, diag::note_module_odr_violation_definition_data) + << SecondModule << Range << DiffType; + }; + auto GetSourceRange = [](const struct CXXRecordDecl::DefinitionData *DD) { + unsigned NumBases = DD->NumBases; + if (NumBases == 0) + return SourceRange(); + ArrayRef<CXXBaseSpecifier> bases = DD->bases(); + return SourceRange(bases[0].getBeginLoc(), + bases[NumBases - 1].getEndLoc()); + }; + + unsigned FirstNumBases = FirstDD->NumBases; + unsigned FirstNumVBases = FirstDD->NumVBases; + unsigned SecondNumBases = SecondDD->NumBases; + unsigned SecondNumVBases = SecondDD->NumVBases; + if (FirstNumBases != SecondNumBases) { + DiagBaseError(FirstRecord->getLocation(), GetSourceRange(FirstDD), + NumBases) + << FirstNumBases; + DiagBaseNote(SecondRecord->getLocation(), GetSourceRange(SecondDD), + NumBases) + << SecondNumBases; + return true; + } + + if (FirstNumVBases != SecondNumVBases) { + DiagBaseError(FirstRecord->getLocation(), GetSourceRange(FirstDD), + NumVBases) + << FirstNumVBases; + DiagBaseNote(SecondRecord->getLocation(), GetSourceRange(SecondDD), + NumVBases) + << SecondNumVBases; + return true; + } + + ArrayRef<CXXBaseSpecifier> FirstBases = FirstDD->bases(); + ArrayRef<CXXBaseSpecifier> SecondBases = SecondDD->bases(); + for (unsigned I = 0; I < FirstNumBases; ++I) { + const CXXBaseSpecifier FirstBase = FirstBases[I]; + const CXXBaseSpecifier SecondBase = SecondBases[I]; + if (computeODRHash(FirstBase.getType()) != + computeODRHash(SecondBase.getType())) { + DiagBaseError(FirstRecord->getLocation(), FirstBase.getSourceRange(), + BaseType) + << (I + 1) << FirstBase.getType(); + DiagBaseNote(SecondRecord->getLocation(), SecondBase.getSourceRange(), + BaseType) + << (I + 1) << SecondBase.getType(); + return true; + } + + if (FirstBase.isVirtual() != SecondBase.isVirtual()) { + DiagBaseError(FirstRecord->getLocation(), FirstBase.getSourceRange(), + BaseVirtual) + << (I + 1) << FirstBase.isVirtual() << FirstBase.getType(); + DiagBaseNote(SecondRecord->getLocation(), SecondBase.getSourceRange(), + BaseVirtual) + << (I + 1) << SecondBase.isVirtual() << SecondBase.getType(); + return true; + } + + if (FirstBase.getAccessSpecifierAsWritten() != + SecondBase.getAccessSpecifierAsWritten()) { + DiagBaseError(FirstRecord->getLocation(), FirstBase.getSourceRange(), + BaseAccess) + << (I + 1) << FirstBase.getType() + << (int)FirstBase.getAccessSpecifierAsWritten(); + DiagBaseNote(SecondRecord->getLocation(), SecondBase.getSourceRange(), + BaseAccess) + << (I + 1) << SecondBase.getType() + << (int)SecondBase.getAccessSpecifierAsWritten(); + return true; + } + } + } + + const ClassTemplateDecl *FirstTemplate = + FirstRecord->getDescribedClassTemplate(); + const ClassTemplateDecl *SecondTemplate = + SecondRecord->getDescribedClassTemplate(); + + assert(!FirstTemplate == !SecondTemplate && + "Both pointers should be null or non-null"); + + if (FirstTemplate && SecondTemplate) { + ArrayRef<const NamedDecl *> FirstTemplateParams = + FirstTemplate->getTemplateParameters()->asArray(); + ArrayRef<const NamedDecl *> SecondTemplateParams = + SecondTemplate->getTemplateParameters()->asArray(); + assert(FirstTemplateParams.size() == SecondTemplateParams.size() && + "Number of template parameters should be equal."); + for (auto Pair : llvm::zip(FirstTemplateParams, SecondTemplateParams)) { + const NamedDecl *FirstDecl = std::get<0>(Pair); + const NamedDecl *SecondDecl = std::get<1>(Pair); + if (computeODRHash(FirstDecl) == computeODRHash(SecondDecl)) + continue; + + assert(FirstDecl->getKind() == SecondDecl->getKind() && + "Parameter Decl's should be the same kind."); + + enum ODRTemplateDifference { + ParamEmptyName, + ParamName, + ParamSingleDefaultArgument, + ParamDifferentDefaultArgument, + }; + + auto hasDefaultArg = [](const NamedDecl *D) { + if (auto *TTP = dyn_cast<TemplateTypeParmDecl>(D)) + return TTP->hasDefaultArgument() && + !TTP->defaultArgumentWasInherited(); + if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(D)) + return NTTP->hasDefaultArgument() && + !NTTP->defaultArgumentWasInherited(); + auto *TTP = cast<TemplateTemplateParmDecl>(D); + return TTP->hasDefaultArgument() && !TTP->defaultArgumentWasInherited(); + }; + bool hasFirstArg = hasDefaultArg(FirstDecl); + bool hasSecondArg = hasDefaultArg(SecondDecl); + + ODRTemplateDifference ErrDiffType; + ODRTemplateDifference NoteDiffType; + + DeclarationName FirstName = FirstDecl->getDeclName(); + DeclarationName SecondName = SecondDecl->getDeclName(); + + if (FirstName != SecondName) { + bool FirstNameEmpty = + FirstName.isIdentifier() && !FirstName.getAsIdentifierInfo(); + bool SecondNameEmpty = + SecondName.isIdentifier() && !SecondName.getAsIdentifierInfo(); + ErrDiffType = FirstNameEmpty ? ParamEmptyName : ParamName; + NoteDiffType = SecondNameEmpty ? ParamEmptyName : ParamName; + } else if (hasFirstArg == hasSecondArg) + ErrDiffType = NoteDiffType = ParamDifferentDefaultArgument; + else + ErrDiffType = NoteDiffType = ParamSingleDefaultArgument; + + Diag(FirstDecl->getLocation(), + diag::err_module_odr_violation_template_parameter) + << FirstRecord << FirstModule.empty() << FirstModule + << FirstDecl->getSourceRange() << ErrDiffType << hasFirstArg + << FirstName; + Diag(SecondDecl->getLocation(), + diag::note_module_odr_violation_template_parameter) + << SecondModule << SecondDecl->getSourceRange() << NoteDiffType + << hasSecondArg << SecondName; + return true; + } + } + + auto PopulateHashes = [](DeclHashes &Hashes, const RecordDecl *Record, + const DeclContext *DC) { + for (const Decl *D : Record->decls()) { + if (!ODRHash::isSubDeclToBeProcessed(D, DC)) + continue; + Hashes.emplace_back(D, computeODRHash(D)); + } + }; + + DeclHashes FirstHashes; + DeclHashes SecondHashes; + const DeclContext *DC = FirstRecord; + PopulateHashes(FirstHashes, FirstRecord, DC); + PopulateHashes(SecondHashes, SecondRecord, DC); + + DiffResult DR = FindTypeDiffs(FirstHashes, SecondHashes); + ODRMismatchDecl FirstDiffType = DR.FirstDiffType; + ODRMismatchDecl SecondDiffType = DR.SecondDiffType; + const Decl *FirstDecl = DR.FirstDecl; + const Decl *SecondDecl = DR.SecondDecl; + + if (FirstDiffType == Other || SecondDiffType == Other) { + diagnoseSubMismatchUnexpected(DR, FirstRecord, FirstModule, SecondRecord, + SecondModule); + return true; + } + + if (FirstDiffType != SecondDiffType) { + diagnoseSubMismatchDifferentDeclKinds(DR, FirstRecord, FirstModule, + SecondRecord, SecondModule); + return true; + } + + // Used with err_module_odr_violation_record and + // note_module_odr_violation_record + enum ODRCXXRecordDifference { + StaticAssertCondition, + StaticAssertMessage, + StaticAssertOnlyMessage, + MethodName, + MethodDeleted, + MethodDefaulted, + MethodVirtual, + MethodStatic, + MethodVolatile, + MethodConst, + MethodInline, + MethodParameterSingleDefaultArgument, + MethodParameterDifferentDefaultArgument, + MethodNoTemplateArguments, + MethodDifferentNumberTemplateArguments, + MethodDifferentTemplateArgument, + MethodSingleBody, + MethodDifferentBody, + FriendTypeFunction, + FriendType, + FriendFunction, + FunctionTemplateDifferentNumberParameters, + FunctionTemplateParameterDifferentKind, + FunctionTemplateParameterName, + FunctionTemplateParameterSingleDefaultArgument, + FunctionTemplateParameterDifferentDefaultArgument, + FunctionTemplateParameterDifferentType, + FunctionTemplatePackParameter, + }; + auto DiagError = [FirstRecord, &FirstModule, + this](SourceLocation Loc, SourceRange Range, + ODRCXXRecordDifference DiffType) { + return Diag(Loc, diag::err_module_odr_violation_record) + << FirstRecord << FirstModule.empty() << FirstModule << Range + << DiffType; + }; + auto DiagNote = [&SecondModule, this](SourceLocation Loc, SourceRange Range, + ODRCXXRecordDifference DiffType) { + return Diag(Loc, diag::note_module_odr_violation_record) + << SecondModule << Range << DiffType; + }; + + assert(FirstDiffType == SecondDiffType); + switch (FirstDiffType) { + case Other: + case EndOfClass: + case PublicSpecifer: + case PrivateSpecifer: + case ProtectedSpecifer: + case ObjCMethod: + case ObjCIvar: + case ObjCProperty: + llvm_unreachable("Invalid diff type"); + + case StaticAssert: { + const StaticAssertDecl *FirstSA = cast<StaticAssertDecl>(FirstDecl); + const StaticAssertDecl *SecondSA = cast<StaticAssertDecl>(SecondDecl); + + const Expr *FirstExpr = FirstSA->getAssertExpr(); + const Expr *SecondExpr = SecondSA->getAssertExpr(); + unsigned FirstODRHash = computeODRHash(FirstExpr); + unsigned SecondODRHash = computeODRHash(SecondExpr); + if (FirstODRHash != SecondODRHash) { + DiagError(FirstExpr->getBeginLoc(), FirstExpr->getSourceRange(), + StaticAssertCondition); + DiagNote(SecondExpr->getBeginLoc(), SecondExpr->getSourceRange(), + StaticAssertCondition); + return true; + } + + const Expr *FirstMessage = FirstSA->getMessage(); + const Expr *SecondMessage = SecondSA->getMessage(); + assert((FirstMessage || SecondMessage) && "Both messages cannot be empty"); + if ((FirstMessage && !SecondMessage) || (!FirstMessage && SecondMessage)) { + SourceLocation FirstLoc, SecondLoc; + SourceRange FirstRange, SecondRange; + if (FirstMessage) { + FirstLoc = FirstMessage->getBeginLoc(); + FirstRange = FirstMessage->getSourceRange(); + } else { + FirstLoc = FirstSA->getBeginLoc(); + FirstRange = FirstSA->getSourceRange(); + } + if (SecondMessage) { + SecondLoc = SecondMessage->getBeginLoc(); + SecondRange = SecondMessage->getSourceRange(); + } else { + SecondLoc = SecondSA->getBeginLoc(); + SecondRange = SecondSA->getSourceRange(); + } + DiagError(FirstLoc, FirstRange, StaticAssertOnlyMessage) + << (FirstMessage == nullptr); + DiagNote(SecondLoc, SecondRange, StaticAssertOnlyMessage) + << (SecondMessage == nullptr); + return true; + } + + if (FirstMessage && SecondMessage) { + unsigned FirstMessageODRHash = computeODRHash(FirstMessage); + unsigned SecondMessageODRHash = computeODRHash(SecondMessage); + if (FirstMessageODRHash != SecondMessageODRHash) { + DiagError(FirstMessage->getBeginLoc(), FirstMessage->getSourceRange(), + StaticAssertMessage); + DiagNote(SecondMessage->getBeginLoc(), SecondMessage->getSourceRange(), + StaticAssertMessage); + return true; + } + } + break; + } + + case Field: { + if (diagnoseSubMismatchField(FirstRecord, FirstModule, SecondModule, + cast<FieldDecl>(FirstDecl), + cast<FieldDecl>(SecondDecl))) + return true; + break; + } + + case CXXMethod: { + enum { + DiagMethod, + DiagConstructor, + DiagDestructor, + } FirstMethodType, + SecondMethodType; + auto GetMethodTypeForDiagnostics = [](const CXXMethodDecl *D) { + if (isa<CXXConstructorDecl>(D)) + return DiagConstructor; + if (isa<CXXDestructorDecl>(D)) + return DiagDestructor; + return DiagMethod; + }; + const CXXMethodDecl *FirstMethod = cast<CXXMethodDecl>(FirstDecl); + const CXXMethodDecl *SecondMethod = cast<CXXMethodDecl>(SecondDecl); + FirstMethodType = GetMethodTypeForDiagnostics(FirstMethod); + SecondMethodType = GetMethodTypeForDiagnostics(SecondMethod); + DeclarationName FirstName = FirstMethod->getDeclName(); + DeclarationName SecondName = SecondMethod->getDeclName(); + auto DiagMethodError = [&DiagError, FirstMethod, FirstMethodType, + FirstName](ODRCXXRecordDifference DiffType) { + return DiagError(FirstMethod->getLocation(), + FirstMethod->getSourceRange(), DiffType) + << FirstMethodType << FirstName; + }; + auto DiagMethodNote = [&DiagNote, SecondMethod, SecondMethodType, + SecondName](ODRCXXRecordDifference DiffType) { + return DiagNote(SecondMethod->getLocation(), + SecondMethod->getSourceRange(), DiffType) + << SecondMethodType << SecondName; + }; + + if (FirstMethodType != SecondMethodType || FirstName != SecondName) { + DiagMethodError(MethodName); + DiagMethodNote(MethodName); + return true; + } + + const bool FirstDeleted = FirstMethod->isDeletedAsWritten(); + const bool SecondDeleted = SecondMethod->isDeletedAsWritten(); + if (FirstDeleted != SecondDeleted) { + DiagMethodError(MethodDeleted) << FirstDeleted; + DiagMethodNote(MethodDeleted) << SecondDeleted; + return true; + } + + const bool FirstDefaulted = FirstMethod->isExplicitlyDefaulted(); + const bool SecondDefaulted = SecondMethod->isExplicitlyDefaulted(); + if (FirstDefaulted != SecondDefaulted) { + DiagMethodError(MethodDefaulted) << FirstDefaulted; + DiagMethodNote(MethodDefaulted) << SecondDefaulted; + return true; + } + + const bool FirstVirtual = FirstMethod->isVirtualAsWritten(); + const bool SecondVirtual = SecondMethod->isVirtualAsWritten(); + const bool FirstPure = FirstMethod->isPureVirtual(); + const bool SecondPure = SecondMethod->isPureVirtual(); + if ((FirstVirtual || SecondVirtual) && + (FirstVirtual != SecondVirtual || FirstPure != SecondPure)) { + DiagMethodError(MethodVirtual) << FirstPure << FirstVirtual; + DiagMethodNote(MethodVirtual) << SecondPure << SecondVirtual; + return true; + } + + // CXXMethodDecl::isStatic uses the canonical Decl. With Decl merging, + // FirstDecl is the canonical Decl of SecondDecl, so the storage + // class needs to be checked instead. + StorageClass FirstStorage = FirstMethod->getStorageClass(); + StorageClass SecondStorage = SecondMethod->getStorageClass(); + const bool FirstStatic = FirstStorage == SC_Static; + const bool SecondStatic = SecondStorage == SC_Static; + if (FirstStatic != SecondStatic) { + DiagMethodError(MethodStatic) << FirstStatic; + DiagMethodNote(MethodStatic) << SecondStatic; + return true; + } + + const bool FirstVolatile = FirstMethod->isVolatile(); + const bool SecondVolatile = SecondMethod->isVolatile(); + if (FirstVolatile != SecondVolatile) { + DiagMethodError(MethodVolatile) << FirstVolatile; + DiagMethodNote(MethodVolatile) << SecondVolatile; + return true; + } + + const bool FirstConst = FirstMethod->isConst(); + const bool SecondConst = SecondMethod->isConst(); + if (FirstConst != SecondConst) { + DiagMethodError(MethodConst) << FirstConst; + DiagMethodNote(MethodConst) << SecondConst; + return true; + } + + const bool FirstInline = FirstMethod->isInlineSpecified(); + const bool SecondInline = SecondMethod->isInlineSpecified(); + if (FirstInline != SecondInline) { + DiagMethodError(MethodInline) << FirstInline; + DiagMethodNote(MethodInline) << SecondInline; + return true; + } + + if (diagnoseSubMismatchMethodParameters(Diags, FirstRecord, + FirstModule, SecondModule, + FirstMethod, SecondMethod)) + return true; + + for (unsigned I = 0, N = FirstMethod->param_size(); I < N; ++I) { + const ParmVarDecl *FirstParam = FirstMethod->getParamDecl(I); + const ParmVarDecl *SecondParam = SecondMethod->getParamDecl(I); + + const Expr *FirstInit = FirstParam->getInit(); + const Expr *SecondInit = SecondParam->getInit(); + if ((FirstInit == nullptr) != (SecondInit == nullptr)) { + DiagMethodError(MethodParameterSingleDefaultArgument) + << (I + 1) << (FirstInit == nullptr) + << (FirstInit ? FirstInit->getSourceRange() : SourceRange()); + DiagMethodNote(MethodParameterSingleDefaultArgument) + << (I + 1) << (SecondInit == nullptr) + << (SecondInit ? SecondInit->getSourceRange() : SourceRange()); + return true; + } + + if (FirstInit && SecondInit && + computeODRHash(FirstInit) != computeODRHash(SecondInit)) { + DiagMethodError(MethodParameterDifferentDefaultArgument) + << (I + 1) << FirstInit->getSourceRange(); + DiagMethodNote(MethodParameterDifferentDefaultArgument) + << (I + 1) << SecondInit->getSourceRange(); + return true; + } + } + + const TemplateArgumentList *FirstTemplateArgs = + FirstMethod->getTemplateSpecializationArgs(); + const TemplateArgumentList *SecondTemplateArgs = + SecondMethod->getTemplateSpecializationArgs(); + + if ((FirstTemplateArgs && !SecondTemplateArgs) || + (!FirstTemplateArgs && SecondTemplateArgs)) { + DiagMethodError(MethodNoTemplateArguments) + << (FirstTemplateArgs != nullptr); + DiagMethodNote(MethodNoTemplateArguments) + << (SecondTemplateArgs != nullptr); + return true; + } + + if (FirstTemplateArgs && SecondTemplateArgs) { + // Remove pack expansions from argument list. + auto ExpandTemplateArgumentList = [](const TemplateArgumentList *TAL) { + llvm::SmallVector<const TemplateArgument *, 8> ExpandedList; + for (const TemplateArgument &TA : TAL->asArray()) { + if (TA.getKind() != TemplateArgument::Pack) { + ExpandedList.push_back(&TA); + continue; + } + llvm::append_range(ExpandedList, + llvm::make_pointer_range(TA.getPackAsArray())); + } + return ExpandedList; + }; + llvm::SmallVector<const TemplateArgument *, 8> FirstExpandedList = + ExpandTemplateArgumentList(FirstTemplateArgs); + llvm::SmallVector<const TemplateArgument *, 8> SecondExpandedList = + ExpandTemplateArgumentList(SecondTemplateArgs); + + if (FirstExpandedList.size() != SecondExpandedList.size()) { + DiagMethodError(MethodDifferentNumberTemplateArguments) + << (unsigned)FirstExpandedList.size(); + DiagMethodNote(MethodDifferentNumberTemplateArguments) + << (unsigned)SecondExpandedList.size(); + return true; + } + + for (unsigned i = 0, e = FirstExpandedList.size(); i != e; ++i) { + const TemplateArgument &FirstTA = *FirstExpandedList[i], + &SecondTA = *SecondExpandedList[i]; + if (computeODRHash(FirstTA) == computeODRHash(SecondTA)) + continue; + + DiagMethodError(MethodDifferentTemplateArgument) << FirstTA << i + 1; + DiagMethodNote(MethodDifferentTemplateArgument) << SecondTA << i + 1; + return true; + } + } + + // Compute the hash of the method as if it has no body. + auto ComputeCXXMethodODRHash = [](const CXXMethodDecl *D) { + ODRHash Hasher; + Hasher.AddFunctionDecl(D, true /*SkipBody*/); + return Hasher.CalculateHash(); + }; + + // Compare the hash generated to the hash stored. A difference means + // that a body was present in the original source. Due to merging, + // the standard way of detecting a body will not work. + const bool HasFirstBody = + ComputeCXXMethodODRHash(FirstMethod) != FirstMethod->getODRHash(); + const bool HasSecondBody = + ComputeCXXMethodODRHash(SecondMethod) != SecondMethod->getODRHash(); + + if (HasFirstBody != HasSecondBody) { + DiagMethodError(MethodSingleBody) << HasFirstBody; + DiagMethodNote(MethodSingleBody) << HasSecondBody; + return true; + } + + if (HasFirstBody && HasSecondBody) { + DiagMethodError(MethodDifferentBody); + DiagMethodNote(MethodDifferentBody); + return true; + } + + break; + } + + case TypeAlias: + case TypeDef: { + if (diagnoseSubMismatchTypedef(FirstRecord, FirstModule, SecondModule, + cast<TypedefNameDecl>(FirstDecl), + cast<TypedefNameDecl>(SecondDecl), + FirstDiffType == TypeAlias)) + return true; + break; + } + case Var: { + if (diagnoseSubMismatchVar(FirstRecord, FirstModule, SecondModule, + cast<VarDecl>(FirstDecl), + cast<VarDecl>(SecondDecl))) + return true; + break; + } + case Friend: { + const FriendDecl *FirstFriend = cast<FriendDecl>(FirstDecl); + const FriendDecl *SecondFriend = cast<FriendDecl>(SecondDecl); + + const NamedDecl *FirstND = FirstFriend->getFriendDecl(); + const NamedDecl *SecondND = SecondFriend->getFriendDecl(); + + TypeSourceInfo *FirstTSI = FirstFriend->getFriendType(); + TypeSourceInfo *SecondTSI = SecondFriend->getFriendType(); + + if (FirstND && SecondND) { + DiagError(FirstFriend->getFriendLoc(), FirstFriend->getSourceRange(), + FriendFunction) + << FirstND; + DiagNote(SecondFriend->getFriendLoc(), SecondFriend->getSourceRange(), + FriendFunction) + << SecondND; + return true; + } + + if (FirstTSI && SecondTSI) { + QualType FirstFriendType = FirstTSI->getType(); + QualType SecondFriendType = SecondTSI->getType(); + assert(computeODRHash(FirstFriendType) != + computeODRHash(SecondFriendType)); + DiagError(FirstFriend->getFriendLoc(), FirstFriend->getSourceRange(), + FriendType) + << FirstFriendType; + DiagNote(SecondFriend->getFriendLoc(), SecondFriend->getSourceRange(), + FriendType) + << SecondFriendType; + return true; + } + + DiagError(FirstFriend->getFriendLoc(), FirstFriend->getSourceRange(), + FriendTypeFunction) + << (FirstTSI == nullptr); + DiagNote(SecondFriend->getFriendLoc(), SecondFriend->getSourceRange(), + FriendTypeFunction) + << (SecondTSI == nullptr); + return true; + } + case FunctionTemplate: { + const FunctionTemplateDecl *FirstTemplate = + cast<FunctionTemplateDecl>(FirstDecl); + const FunctionTemplateDecl *SecondTemplate = + cast<FunctionTemplateDecl>(SecondDecl); + + TemplateParameterList *FirstTPL = FirstTemplate->getTemplateParameters(); + TemplateParameterList *SecondTPL = SecondTemplate->getTemplateParameters(); + + auto DiagTemplateError = [&DiagError, + FirstTemplate](ODRCXXRecordDifference DiffType) { + return DiagError(FirstTemplate->getLocation(), + FirstTemplate->getSourceRange(), DiffType) + << FirstTemplate; + }; + auto DiagTemplateNote = [&DiagNote, + SecondTemplate](ODRCXXRecordDifference DiffType) { + return DiagNote(SecondTemplate->getLocation(), + SecondTemplate->getSourceRange(), DiffType) + << SecondTemplate; + }; + + if (FirstTPL->size() != SecondTPL->size()) { + DiagTemplateError(FunctionTemplateDifferentNumberParameters) + << FirstTPL->size(); + DiagTemplateNote(FunctionTemplateDifferentNumberParameters) + << SecondTPL->size(); + return true; + } + + for (unsigned i = 0, e = FirstTPL->size(); i != e; ++i) { + NamedDecl *FirstParam = FirstTPL->getParam(i); + NamedDecl *SecondParam = SecondTPL->getParam(i); + + if (FirstParam->getKind() != SecondParam->getKind()) { + enum { + TemplateTypeParameter, + NonTypeTemplateParameter, + TemplateTemplateParameter, + }; + auto GetParamType = [](NamedDecl *D) { + switch (D->getKind()) { + default: + llvm_unreachable("Unexpected template parameter type"); + case Decl::TemplateTypeParm: + return TemplateTypeParameter; + case Decl::NonTypeTemplateParm: + return NonTypeTemplateParameter; + case Decl::TemplateTemplateParm: + return TemplateTemplateParameter; + } + }; + + DiagTemplateError(FunctionTemplateParameterDifferentKind) + << (i + 1) << GetParamType(FirstParam); + DiagTemplateNote(FunctionTemplateParameterDifferentKind) + << (i + 1) << GetParamType(SecondParam); + return true; + } + + if (FirstParam->getName() != SecondParam->getName()) { + DiagTemplateError(FunctionTemplateParameterName) + << (i + 1) << (bool)FirstParam->getIdentifier() << FirstParam; + DiagTemplateNote(FunctionTemplateParameterName) + << (i + 1) << (bool)SecondParam->getIdentifier() << SecondParam; + return true; + } + + if (isa<TemplateTypeParmDecl>(FirstParam) && + isa<TemplateTypeParmDecl>(SecondParam)) { + TemplateTypeParmDecl *FirstTTPD = + cast<TemplateTypeParmDecl>(FirstParam); + TemplateTypeParmDecl *SecondTTPD = + cast<TemplateTypeParmDecl>(SecondParam); + bool HasFirstDefaultArgument = + FirstTTPD->hasDefaultArgument() && + !FirstTTPD->defaultArgumentWasInherited(); + bool HasSecondDefaultArgument = + SecondTTPD->hasDefaultArgument() && + !SecondTTPD->defaultArgumentWasInherited(); + if (HasFirstDefaultArgument != HasSecondDefaultArgument) { + DiagTemplateError(FunctionTemplateParameterSingleDefaultArgument) + << (i + 1) << HasFirstDefaultArgument; + DiagTemplateNote(FunctionTemplateParameterSingleDefaultArgument) + << (i + 1) << HasSecondDefaultArgument; + return true; + } + + if (HasFirstDefaultArgument && HasSecondDefaultArgument) { + TemplateArgument FirstTA = + FirstTTPD->getDefaultArgument().getArgument(); + TemplateArgument SecondTA = + SecondTTPD->getDefaultArgument().getArgument(); + if (computeODRHash(FirstTA) != computeODRHash(SecondTA)) { + DiagTemplateError(FunctionTemplateParameterDifferentDefaultArgument) + << (i + 1) << FirstTA; + DiagTemplateNote(FunctionTemplateParameterDifferentDefaultArgument) + << (i + 1) << SecondTA; + return true; + } + } + + if (FirstTTPD->isParameterPack() != SecondTTPD->isParameterPack()) { + DiagTemplateError(FunctionTemplatePackParameter) + << (i + 1) << FirstTTPD->isParameterPack(); + DiagTemplateNote(FunctionTemplatePackParameter) + << (i + 1) << SecondTTPD->isParameterPack(); + return true; + } + } + + if (isa<TemplateTemplateParmDecl>(FirstParam) && + isa<TemplateTemplateParmDecl>(SecondParam)) { + TemplateTemplateParmDecl *FirstTTPD = + cast<TemplateTemplateParmDecl>(FirstParam); + TemplateTemplateParmDecl *SecondTTPD = + cast<TemplateTemplateParmDecl>(SecondParam); + + TemplateParameterList *FirstTPL = FirstTTPD->getTemplateParameters(); + TemplateParameterList *SecondTPL = SecondTTPD->getTemplateParameters(); + + auto ComputeTemplateParameterListODRHash = + [](const TemplateParameterList *TPL) { + assert(TPL); + ODRHash Hasher; + Hasher.AddTemplateParameterList(TPL); + return Hasher.CalculateHash(); + }; + + if (ComputeTemplateParameterListODRHash(FirstTPL) != + ComputeTemplateParameterListODRHash(SecondTPL)) { + DiagTemplateError(FunctionTemplateParameterDifferentType) << (i + 1); + DiagTemplateNote(FunctionTemplateParameterDifferentType) << (i + 1); + return true; + } + + bool HasFirstDefaultArgument = + FirstTTPD->hasDefaultArgument() && + !FirstTTPD->defaultArgumentWasInherited(); + bool HasSecondDefaultArgument = + SecondTTPD->hasDefaultArgument() && + !SecondTTPD->defaultArgumentWasInherited(); + if (HasFirstDefaultArgument != HasSecondDefaultArgument) { + DiagTemplateError(FunctionTemplateParameterSingleDefaultArgument) + << (i + 1) << HasFirstDefaultArgument; + DiagTemplateNote(FunctionTemplateParameterSingleDefaultArgument) + << (i + 1) << HasSecondDefaultArgument; + return true; + } + + if (HasFirstDefaultArgument && HasSecondDefaultArgument) { + TemplateArgument FirstTA = + FirstTTPD->getDefaultArgument().getArgument(); + TemplateArgument SecondTA = + SecondTTPD->getDefaultArgument().getArgument(); + if (computeODRHash(FirstTA) != computeODRHash(SecondTA)) { + DiagTemplateError(FunctionTemplateParameterDifferentDefaultArgument) + << (i + 1) << FirstTA; + DiagTemplateNote(FunctionTemplateParameterDifferentDefaultArgument) + << (i + 1) << SecondTA; + return true; + } + } + + if (FirstTTPD->isParameterPack() != SecondTTPD->isParameterPack()) { + DiagTemplateError(FunctionTemplatePackParameter) + << (i + 1) << FirstTTPD->isParameterPack(); + DiagTemplateNote(FunctionTemplatePackParameter) + << (i + 1) << SecondTTPD->isParameterPack(); + return true; + } + } + + if (isa<NonTypeTemplateParmDecl>(FirstParam) && + isa<NonTypeTemplateParmDecl>(SecondParam)) { + NonTypeTemplateParmDecl *FirstNTTPD = + cast<NonTypeTemplateParmDecl>(FirstParam); + NonTypeTemplateParmDecl *SecondNTTPD = + cast<NonTypeTemplateParmDecl>(SecondParam); + + QualType FirstType = FirstNTTPD->getType(); + QualType SecondType = SecondNTTPD->getType(); + if (computeODRHash(FirstType) != computeODRHash(SecondType)) { + DiagTemplateError(FunctionTemplateParameterDifferentType) << (i + 1); + DiagTemplateNote(FunctionTemplateParameterDifferentType) << (i + 1); + return true; + } + + bool HasFirstDefaultArgument = + FirstNTTPD->hasDefaultArgument() && + !FirstNTTPD->defaultArgumentWasInherited(); + bool HasSecondDefaultArgument = + SecondNTTPD->hasDefaultArgument() && + !SecondNTTPD->defaultArgumentWasInherited(); + if (HasFirstDefaultArgument != HasSecondDefaultArgument) { + DiagTemplateError(FunctionTemplateParameterSingleDefaultArgument) + << (i + 1) << HasFirstDefaultArgument; + DiagTemplateNote(FunctionTemplateParameterSingleDefaultArgument) + << (i + 1) << HasSecondDefaultArgument; + return true; + } + + if (HasFirstDefaultArgument && HasSecondDefaultArgument) { + TemplateArgument FirstDefaultArgument = + FirstNTTPD->getDefaultArgument().getArgument(); + TemplateArgument SecondDefaultArgument = + SecondNTTPD->getDefaultArgument().getArgument(); + + if (computeODRHash(FirstDefaultArgument) != + computeODRHash(SecondDefaultArgument)) { + DiagTemplateError(FunctionTemplateParameterDifferentDefaultArgument) + << (i + 1) << FirstDefaultArgument; + DiagTemplateNote(FunctionTemplateParameterDifferentDefaultArgument) + << (i + 1) << SecondDefaultArgument; + return true; + } + } + + if (FirstNTTPD->isParameterPack() != SecondNTTPD->isParameterPack()) { + DiagTemplateError(FunctionTemplatePackParameter) + << (i + 1) << FirstNTTPD->isParameterPack(); + DiagTemplateNote(FunctionTemplatePackParameter) + << (i + 1) << SecondNTTPD->isParameterPack(); + return true; + } + } + } + break; + } + } + + Diag(FirstDecl->getLocation(), + diag::err_module_odr_violation_mismatch_decl_unknown) + << FirstRecord << FirstModule.empty() << FirstModule << FirstDiffType + << FirstDecl->getSourceRange(); + Diag(SecondDecl->getLocation(), + diag::note_module_odr_violation_mismatch_decl_unknown) + << SecondModule.empty() << SecondModule << FirstDiffType + << SecondDecl->getSourceRange(); + return true; +} + +bool ODRDiagsEmitter::diagnoseMismatch(const RecordDecl *FirstRecord, + const RecordDecl *SecondRecord) const { + if (FirstRecord == SecondRecord) + return false; + + std::string FirstModule = getOwningModuleNameForDiagnostic(FirstRecord); + std::string SecondModule = getOwningModuleNameForDiagnostic(SecondRecord); + + auto PopulateHashes = [](DeclHashes &Hashes, const RecordDecl *Record, + const DeclContext *DC) { + for (const Decl *D : Record->decls()) { + if (!ODRHash::isSubDeclToBeProcessed(D, DC)) + continue; + Hashes.emplace_back(D, computeODRHash(D)); + } + }; + + DeclHashes FirstHashes; + DeclHashes SecondHashes; + const DeclContext *DC = FirstRecord; + PopulateHashes(FirstHashes, FirstRecord, DC); + PopulateHashes(SecondHashes, SecondRecord, DC); + + DiffResult DR = FindTypeDiffs(FirstHashes, SecondHashes); + ODRMismatchDecl FirstDiffType = DR.FirstDiffType; + ODRMismatchDecl SecondDiffType = DR.SecondDiffType; + const Decl *FirstDecl = DR.FirstDecl; + const Decl *SecondDecl = DR.SecondDecl; + + if (FirstDiffType == Other || SecondDiffType == Other) { + diagnoseSubMismatchUnexpected(DR, FirstRecord, FirstModule, SecondRecord, + SecondModule); + return true; + } + + if (FirstDiffType != SecondDiffType) { + diagnoseSubMismatchDifferentDeclKinds(DR, FirstRecord, FirstModule, + SecondRecord, SecondModule); + return true; + } + + assert(FirstDiffType == SecondDiffType); + switch (FirstDiffType) { + // Already handled. + case EndOfClass: + case Other: + // C++ only, invalid in this context. + case PublicSpecifer: + case PrivateSpecifer: + case ProtectedSpecifer: + case StaticAssert: + case CXXMethod: + case TypeAlias: + case Friend: + case FunctionTemplate: + // Cannot be contained by RecordDecl, invalid in this context. + case ObjCMethod: + case ObjCIvar: + case ObjCProperty: + llvm_unreachable("Invalid diff type"); + + case Field: { + if (diagnoseSubMismatchField(FirstRecord, FirstModule, SecondModule, + cast<FieldDecl>(FirstDecl), + cast<FieldDecl>(SecondDecl))) + return true; + break; + } + case TypeDef: { + if (diagnoseSubMismatchTypedef(FirstRecord, FirstModule, SecondModule, + cast<TypedefNameDecl>(FirstDecl), + cast<TypedefNameDecl>(SecondDecl), + /*IsTypeAlias=*/false)) + return true; + break; + } + case Var: { + if (diagnoseSubMismatchVar(FirstRecord, FirstModule, SecondModule, + cast<VarDecl>(FirstDecl), + cast<VarDecl>(SecondDecl))) + return true; + break; + } + } + + Diag(FirstDecl->getLocation(), + diag::err_module_odr_violation_mismatch_decl_unknown) + << FirstRecord << FirstModule.empty() << FirstModule << FirstDiffType + << FirstDecl->getSourceRange(); + Diag(SecondDecl->getLocation(), + diag::note_module_odr_violation_mismatch_decl_unknown) + << SecondModule.empty() << SecondModule << FirstDiffType + << SecondDecl->getSourceRange(); + return true; +} + +bool ODRDiagsEmitter::diagnoseMismatch( + const FunctionDecl *FirstFunction, + const FunctionDecl *SecondFunction) const { + if (FirstFunction == SecondFunction) + return false; + + // Keep in sync with select options in err_module_odr_violation_function. + enum ODRFunctionDifference { + ReturnType, + ParameterName, + ParameterType, + ParameterSingleDefaultArgument, + ParameterDifferentDefaultArgument, + FunctionBody, + }; + + std::string FirstModule = getOwningModuleNameForDiagnostic(FirstFunction); + std::string SecondModule = getOwningModuleNameForDiagnostic(SecondFunction); + + auto DiagError = [FirstFunction, &FirstModule, + this](SourceLocation Loc, SourceRange Range, + ODRFunctionDifference DiffType) { + return Diag(Loc, diag::err_module_odr_violation_function) + << FirstFunction << FirstModule.empty() << FirstModule << Range + << DiffType; + }; + auto DiagNote = [&SecondModule, this](SourceLocation Loc, SourceRange Range, + ODRFunctionDifference DiffType) { + return Diag(Loc, diag::note_module_odr_violation_function) + << SecondModule << Range << DiffType; + }; + + if (computeODRHash(FirstFunction->getReturnType()) != + computeODRHash(SecondFunction->getReturnType())) { + DiagError(FirstFunction->getReturnTypeSourceRange().getBegin(), + FirstFunction->getReturnTypeSourceRange(), ReturnType) + << FirstFunction->getReturnType(); + DiagNote(SecondFunction->getReturnTypeSourceRange().getBegin(), + SecondFunction->getReturnTypeSourceRange(), ReturnType) + << SecondFunction->getReturnType(); + return true; + } + + assert(FirstFunction->param_size() == SecondFunction->param_size() && + "Merged functions with different number of parameters"); + + size_t ParamSize = FirstFunction->param_size(); + for (unsigned I = 0; I < ParamSize; ++I) { + const ParmVarDecl *FirstParam = FirstFunction->getParamDecl(I); + const ParmVarDecl *SecondParam = SecondFunction->getParamDecl(I); + + assert(Context.hasSameType(FirstParam->getType(), SecondParam->getType()) && + "Merged function has different parameter types."); + + if (FirstParam->getDeclName() != SecondParam->getDeclName()) { + DiagError(FirstParam->getLocation(), FirstParam->getSourceRange(), + ParameterName) + << I + 1 << FirstParam->getDeclName(); + DiagNote(SecondParam->getLocation(), SecondParam->getSourceRange(), + ParameterName) + << I + 1 << SecondParam->getDeclName(); + return true; + }; + + QualType FirstParamType = FirstParam->getType(); + QualType SecondParamType = SecondParam->getType(); + if (FirstParamType != SecondParamType && + computeODRHash(FirstParamType) != computeODRHash(SecondParamType)) { + if (const DecayedType *ParamDecayedType = + FirstParamType->getAs<DecayedType>()) { + DiagError(FirstParam->getLocation(), FirstParam->getSourceRange(), + ParameterType) + << (I + 1) << FirstParamType << true + << ParamDecayedType->getOriginalType(); + } else { + DiagError(FirstParam->getLocation(), FirstParam->getSourceRange(), + ParameterType) + << (I + 1) << FirstParamType << false; + } + + if (const DecayedType *ParamDecayedType = + SecondParamType->getAs<DecayedType>()) { + DiagNote(SecondParam->getLocation(), SecondParam->getSourceRange(), + ParameterType) + << (I + 1) << SecondParamType << true + << ParamDecayedType->getOriginalType(); + } else { + DiagNote(SecondParam->getLocation(), SecondParam->getSourceRange(), + ParameterType) + << (I + 1) << SecondParamType << false; + } + return true; + } + + // Note, these calls can trigger deserialization. + const Expr *FirstInit = FirstParam->getInit(); + const Expr *SecondInit = SecondParam->getInit(); + if ((FirstInit == nullptr) != (SecondInit == nullptr)) { + DiagError(FirstParam->getLocation(), FirstParam->getSourceRange(), + ParameterSingleDefaultArgument) + << (I + 1) << (FirstInit == nullptr) + << (FirstInit ? FirstInit->getSourceRange() : SourceRange()); + DiagNote(SecondParam->getLocation(), SecondParam->getSourceRange(), + ParameterSingleDefaultArgument) + << (I + 1) << (SecondInit == nullptr) + << (SecondInit ? SecondInit->getSourceRange() : SourceRange()); + return true; + } + + if (FirstInit && SecondInit && + computeODRHash(FirstInit) != computeODRHash(SecondInit)) { + DiagError(FirstParam->getLocation(), FirstParam->getSourceRange(), + ParameterDifferentDefaultArgument) + << (I + 1) << FirstInit->getSourceRange(); + DiagNote(SecondParam->getLocation(), SecondParam->getSourceRange(), + ParameterDifferentDefaultArgument) + << (I + 1) << SecondInit->getSourceRange(); + return true; + } + + assert(computeODRHash(FirstParam) == computeODRHash(SecondParam) && + "Undiagnosed parameter difference."); + } + + // If no error has been generated before now, assume the problem is in + // the body and generate a message. + DiagError(FirstFunction->getLocation(), FirstFunction->getSourceRange(), + FunctionBody); + DiagNote(SecondFunction->getLocation(), SecondFunction->getSourceRange(), + FunctionBody); + return true; +} + +bool ODRDiagsEmitter::diagnoseMismatch(const EnumDecl *FirstEnum, + const EnumDecl *SecondEnum) const { + if (FirstEnum == SecondEnum) + return false; + + // Keep in sync with select options in err_module_odr_violation_enum. + enum ODREnumDifference { + SingleScopedEnum, + EnumTagKeywordMismatch, + SingleSpecifiedType, + DifferentSpecifiedTypes, + DifferentNumberEnumConstants, + EnumConstantName, + EnumConstantSingleInitializer, + EnumConstantDifferentInitializer, + }; + + std::string FirstModule = getOwningModuleNameForDiagnostic(FirstEnum); + std::string SecondModule = getOwningModuleNameForDiagnostic(SecondEnum); + + auto DiagError = [FirstEnum, &FirstModule, this](const auto *DiagAnchor, + ODREnumDifference DiffType) { + return Diag(DiagAnchor->getLocation(), diag::err_module_odr_violation_enum) + << FirstEnum << FirstModule.empty() << FirstModule + << DiagAnchor->getSourceRange() << DiffType; + }; + auto DiagNote = [&SecondModule, this](const auto *DiagAnchor, + ODREnumDifference DiffType) { + return Diag(DiagAnchor->getLocation(), diag::note_module_odr_violation_enum) + << SecondModule << DiagAnchor->getSourceRange() << DiffType; + }; + + if (FirstEnum->isScoped() != SecondEnum->isScoped()) { + DiagError(FirstEnum, SingleScopedEnum) << FirstEnum->isScoped(); + DiagNote(SecondEnum, SingleScopedEnum) << SecondEnum->isScoped(); + return true; + } + + if (FirstEnum->isScoped() && SecondEnum->isScoped()) { + if (FirstEnum->isScopedUsingClassTag() != + SecondEnum->isScopedUsingClassTag()) { + DiagError(FirstEnum, EnumTagKeywordMismatch) + << FirstEnum->isScopedUsingClassTag(); + DiagNote(SecondEnum, EnumTagKeywordMismatch) + << SecondEnum->isScopedUsingClassTag(); + return true; + } + } + + QualType FirstUnderlyingType = + FirstEnum->getIntegerTypeSourceInfo() + ? FirstEnum->getIntegerTypeSourceInfo()->getType() + : QualType(); + QualType SecondUnderlyingType = + SecondEnum->getIntegerTypeSourceInfo() + ? SecondEnum->getIntegerTypeSourceInfo()->getType() + : QualType(); + if (FirstUnderlyingType.isNull() != SecondUnderlyingType.isNull()) { + DiagError(FirstEnum, SingleSpecifiedType) << !FirstUnderlyingType.isNull(); + DiagNote(SecondEnum, SingleSpecifiedType) << !SecondUnderlyingType.isNull(); + return true; + } + + if (!FirstUnderlyingType.isNull() && !SecondUnderlyingType.isNull()) { + if (computeODRHash(FirstUnderlyingType) != + computeODRHash(SecondUnderlyingType)) { + DiagError(FirstEnum, DifferentSpecifiedTypes) << FirstUnderlyingType; + DiagNote(SecondEnum, DifferentSpecifiedTypes) << SecondUnderlyingType; + return true; + } + } + + // Compare enum constants. + using DeclHashes = + llvm::SmallVector<std::pair<const EnumConstantDecl *, unsigned>, 4>; + auto PopulateHashes = [FirstEnum](DeclHashes &Hashes, const EnumDecl *Enum) { + for (const Decl *D : Enum->decls()) { + // Due to decl merging, the first EnumDecl is the parent of + // Decls in both records. + if (!ODRHash::isSubDeclToBeProcessed(D, FirstEnum)) + continue; + assert(isa<EnumConstantDecl>(D) && "Unexpected Decl kind"); + Hashes.emplace_back(cast<EnumConstantDecl>(D), computeODRHash(D)); + } + }; + DeclHashes FirstHashes; + PopulateHashes(FirstHashes, FirstEnum); + DeclHashes SecondHashes; + PopulateHashes(SecondHashes, SecondEnum); + + if (FirstHashes.size() != SecondHashes.size()) { + DiagError(FirstEnum, DifferentNumberEnumConstants) + << (int)FirstHashes.size(); + DiagNote(SecondEnum, DifferentNumberEnumConstants) + << (int)SecondHashes.size(); + return true; + } + + for (unsigned I = 0, N = FirstHashes.size(); I < N; ++I) { + if (FirstHashes[I].second == SecondHashes[I].second) + continue; + const EnumConstantDecl *FirstConstant = FirstHashes[I].first; + const EnumConstantDecl *SecondConstant = SecondHashes[I].first; + + if (FirstConstant->getDeclName() != SecondConstant->getDeclName()) { + DiagError(FirstConstant, EnumConstantName) << I + 1 << FirstConstant; + DiagNote(SecondConstant, EnumConstantName) << I + 1 << SecondConstant; + return true; + } + + const Expr *FirstInit = FirstConstant->getInitExpr(); + const Expr *SecondInit = SecondConstant->getInitExpr(); + if (!FirstInit && !SecondInit) + continue; + + if (!FirstInit || !SecondInit) { + DiagError(FirstConstant, EnumConstantSingleInitializer) + << I + 1 << FirstConstant << (FirstInit != nullptr); + DiagNote(SecondConstant, EnumConstantSingleInitializer) + << I + 1 << SecondConstant << (SecondInit != nullptr); + return true; + } + + if (computeODRHash(FirstInit) != computeODRHash(SecondInit)) { + DiagError(FirstConstant, EnumConstantDifferentInitializer) + << I + 1 << FirstConstant; + DiagNote(SecondConstant, EnumConstantDifferentInitializer) + << I + 1 << SecondConstant; + return true; + } + } + return false; +} + +bool ODRDiagsEmitter::diagnoseMismatch( + const ObjCInterfaceDecl *FirstID, const ObjCInterfaceDecl *SecondID, + const struct ObjCInterfaceDecl::DefinitionData *SecondDD) const { + // Multiple different declarations got merged together; tell the user + // where they came from. + if (FirstID == SecondID) + return false; + + std::string FirstModule = getOwningModuleNameForDiagnostic(FirstID); + std::string SecondModule = getOwningModuleNameForDiagnostic(SecondID); + + // Keep in sync with err_module_odr_violation_objc_interface. + enum ODRInterfaceDifference { + SuperClassType, + IVarAccess, + }; + + auto DiagError = [FirstID, &FirstModule, + this](SourceLocation Loc, SourceRange Range, + ODRInterfaceDifference DiffType) { + return Diag(Loc, diag::err_module_odr_violation_objc_interface) + << FirstID << FirstModule.empty() << FirstModule << Range + << DiffType; + }; + auto DiagNote = [&SecondModule, this](SourceLocation Loc, SourceRange Range, + ODRInterfaceDifference DiffType) { + return Diag(Loc, diag::note_module_odr_violation_objc_interface) + << SecondModule.empty() << SecondModule << Range << DiffType; + }; + + const struct ObjCInterfaceDecl::DefinitionData *FirstDD = &FirstID->data(); + assert(FirstDD && SecondDD && "Definitions without DefinitionData"); + if (FirstDD != SecondDD) { + // Check for matching super class. + auto GetSuperClassSourceRange = [](const TypeSourceInfo *SuperInfo, + const ObjCInterfaceDecl *ID) { + if (!SuperInfo) + return ID->getSourceRange(); + TypeLoc Loc = SuperInfo->getTypeLoc(); + return SourceRange(Loc.getBeginLoc(), Loc.getEndLoc()); + }; + + ObjCInterfaceDecl *FirstSuperClass = FirstID->getSuperClass(); + ObjCInterfaceDecl *SecondSuperClass = nullptr; + const TypeSourceInfo *FirstSuperInfo = FirstID->getSuperClassTInfo(); + const TypeSourceInfo *SecondSuperInfo = SecondDD->SuperClassTInfo; + if (SecondSuperInfo) + SecondSuperClass = + SecondSuperInfo->getType()->castAs<ObjCObjectType>()->getInterface(); + + if ((FirstSuperClass && SecondSuperClass && + FirstSuperClass->getODRHash() != SecondSuperClass->getODRHash()) || + (FirstSuperClass && !SecondSuperClass) || + (!FirstSuperClass && SecondSuperClass)) { + QualType FirstType; + if (FirstSuperInfo) + FirstType = FirstSuperInfo->getType(); + + DiagError(FirstID->getLocation(), + GetSuperClassSourceRange(FirstSuperInfo, FirstID), + SuperClassType) + << (bool)FirstSuperInfo << FirstType; + + QualType SecondType; + if (SecondSuperInfo) + SecondType = SecondSuperInfo->getType(); + + DiagNote(SecondID->getLocation(), + GetSuperClassSourceRange(SecondSuperInfo, SecondID), + SuperClassType) + << (bool)SecondSuperInfo << SecondType; + return true; + } + + // Check both interfaces reference the same protocols. + auto &FirstProtos = FirstID->getReferencedProtocols(); + auto &SecondProtos = SecondDD->ReferencedProtocols; + if (diagnoseSubMismatchProtocols(FirstProtos, FirstID, FirstModule, + SecondProtos, SecondID, SecondModule)) + return true; + } + + auto PopulateHashes = [](DeclHashes &Hashes, const ObjCInterfaceDecl *ID, + const DeclContext *DC) { + for (auto *D : ID->decls()) { + if (!ODRHash::isSubDeclToBeProcessed(D, DC)) + continue; + Hashes.emplace_back(D, computeODRHash(D)); + } + }; + + DeclHashes FirstHashes; + DeclHashes SecondHashes; + // Use definition as DeclContext because definitions are merged when + // DeclContexts are merged and separate when DeclContexts are separate. + PopulateHashes(FirstHashes, FirstID, FirstID->getDefinition()); + PopulateHashes(SecondHashes, SecondID, SecondID->getDefinition()); + + DiffResult DR = FindTypeDiffs(FirstHashes, SecondHashes); + ODRMismatchDecl FirstDiffType = DR.FirstDiffType; + ODRMismatchDecl SecondDiffType = DR.SecondDiffType; + const Decl *FirstDecl = DR.FirstDecl; + const Decl *SecondDecl = DR.SecondDecl; + + if (FirstDiffType == Other || SecondDiffType == Other) { + diagnoseSubMismatchUnexpected(DR, FirstID, FirstModule, SecondID, + SecondModule); + return true; + } + + if (FirstDiffType != SecondDiffType) { + diagnoseSubMismatchDifferentDeclKinds(DR, FirstID, FirstModule, SecondID, + SecondModule); + return true; + } + + assert(FirstDiffType == SecondDiffType); + switch (FirstDiffType) { + // Already handled. + case EndOfClass: + case Other: + // Cannot be contained by ObjCInterfaceDecl, invalid in this context. + case Field: + case TypeDef: + case Var: + // C++ only, invalid in this context. + case PublicSpecifer: + case PrivateSpecifer: + case ProtectedSpecifer: + case StaticAssert: + case CXXMethod: + case TypeAlias: + case Friend: + case FunctionTemplate: + llvm_unreachable("Invalid diff type"); + + case ObjCMethod: { + if (diagnoseSubMismatchObjCMethod(FirstID, FirstModule, SecondModule, + cast<ObjCMethodDecl>(FirstDecl), + cast<ObjCMethodDecl>(SecondDecl))) + return true; + break; + } + case ObjCIvar: { + if (diagnoseSubMismatchField(FirstID, FirstModule, SecondModule, + cast<FieldDecl>(FirstDecl), + cast<FieldDecl>(SecondDecl))) + return true; + + // Check if the access match. + const ObjCIvarDecl *FirstIvar = cast<ObjCIvarDecl>(FirstDecl); + const ObjCIvarDecl *SecondIvar = cast<ObjCIvarDecl>(SecondDecl); + if (FirstIvar->getCanonicalAccessControl() != + SecondIvar->getCanonicalAccessControl()) { + DiagError(FirstIvar->getLocation(), FirstIvar->getSourceRange(), + IVarAccess) + << FirstIvar->getName() + << (int)FirstIvar->getCanonicalAccessControl(); + DiagNote(SecondIvar->getLocation(), SecondIvar->getSourceRange(), + IVarAccess) + << SecondIvar->getName() + << (int)SecondIvar->getCanonicalAccessControl(); + return true; + } + break; + } + case ObjCProperty: { + if (diagnoseSubMismatchObjCProperty(FirstID, FirstModule, SecondModule, + cast<ObjCPropertyDecl>(FirstDecl), + cast<ObjCPropertyDecl>(SecondDecl))) + return true; + break; + } + } + + Diag(FirstDecl->getLocation(), + diag::err_module_odr_violation_mismatch_decl_unknown) + << FirstID << FirstModule.empty() << FirstModule << FirstDiffType + << FirstDecl->getSourceRange(); + Diag(SecondDecl->getLocation(), + diag::note_module_odr_violation_mismatch_decl_unknown) + << SecondModule.empty() << SecondModule << FirstDiffType + << SecondDecl->getSourceRange(); + return true; +} + +bool ODRDiagsEmitter::diagnoseMismatch( + const ObjCProtocolDecl *FirstProtocol, + const ObjCProtocolDecl *SecondProtocol, + const struct ObjCProtocolDecl::DefinitionData *SecondDD) const { + if (FirstProtocol == SecondProtocol) + return false; + + std::string FirstModule = getOwningModuleNameForDiagnostic(FirstProtocol); + std::string SecondModule = getOwningModuleNameForDiagnostic(SecondProtocol); + + const ObjCProtocolDecl::DefinitionData *FirstDD = &FirstProtocol->data(); + assert(FirstDD && SecondDD && "Definitions without DefinitionData"); + // Diagnostics from ObjCProtocol DefinitionData are emitted here. + if (FirstDD != SecondDD) { + // Check both protocols reference the same protocols. + const ObjCProtocolList &FirstProtocols = + FirstProtocol->getReferencedProtocols(); + const ObjCProtocolList &SecondProtocols = SecondDD->ReferencedProtocols; + if (diagnoseSubMismatchProtocols(FirstProtocols, FirstProtocol, FirstModule, + SecondProtocols, SecondProtocol, + SecondModule)) + return true; + } + + auto PopulateHashes = [](DeclHashes &Hashes, const ObjCProtocolDecl *ID, + const DeclContext *DC) { + for (const Decl *D : ID->decls()) { + if (!ODRHash::isSubDeclToBeProcessed(D, DC)) + continue; + Hashes.emplace_back(D, computeODRHash(D)); + } + }; + + DeclHashes FirstHashes; + DeclHashes SecondHashes; + // Use definition as DeclContext because definitions are merged when + // DeclContexts are merged and separate when DeclContexts are separate. + PopulateHashes(FirstHashes, FirstProtocol, FirstProtocol->getDefinition()); + PopulateHashes(SecondHashes, SecondProtocol, SecondProtocol->getDefinition()); + + DiffResult DR = FindTypeDiffs(FirstHashes, SecondHashes); + ODRMismatchDecl FirstDiffType = DR.FirstDiffType; + ODRMismatchDecl SecondDiffType = DR.SecondDiffType; + const Decl *FirstDecl = DR.FirstDecl; + const Decl *SecondDecl = DR.SecondDecl; + + if (FirstDiffType == Other || SecondDiffType == Other) { + diagnoseSubMismatchUnexpected(DR, FirstProtocol, FirstModule, + SecondProtocol, SecondModule); + return true; + } + + if (FirstDiffType != SecondDiffType) { + diagnoseSubMismatchDifferentDeclKinds(DR, FirstProtocol, FirstModule, + SecondProtocol, SecondModule); + return true; + } + + assert(FirstDiffType == SecondDiffType); + switch (FirstDiffType) { + // Already handled. + case EndOfClass: + case Other: + // Cannot be contained by ObjCProtocolDecl, invalid in this context. + case Field: + case TypeDef: + case Var: + case ObjCIvar: + // C++ only, invalid in this context. + case PublicSpecifer: + case PrivateSpecifer: + case ProtectedSpecifer: + case StaticAssert: + case CXXMethod: + case TypeAlias: + case Friend: + case FunctionTemplate: + llvm_unreachable("Invalid diff type"); + case ObjCMethod: { + if (diagnoseSubMismatchObjCMethod(FirstProtocol, FirstModule, SecondModule, + cast<ObjCMethodDecl>(FirstDecl), + cast<ObjCMethodDecl>(SecondDecl))) + return true; + break; + } + case ObjCProperty: { + if (diagnoseSubMismatchObjCProperty(FirstProtocol, FirstModule, + SecondModule, + cast<ObjCPropertyDecl>(FirstDecl), + cast<ObjCPropertyDecl>(SecondDecl))) + return true; + break; + } + } + + Diag(FirstDecl->getLocation(), + diag::err_module_odr_violation_mismatch_decl_unknown) + << FirstProtocol << FirstModule.empty() << FirstModule << FirstDiffType + << FirstDecl->getSourceRange(); + Diag(SecondDecl->getLocation(), + diag::note_module_odr_violation_mismatch_decl_unknown) + << SecondModule.empty() << SecondModule << FirstDiffType + << SecondDecl->getSourceRange(); + return true; +} diff --git a/contrib/llvm-project/clang/lib/AST/ODRHash.cpp b/contrib/llvm-project/clang/lib/AST/ODRHash.cpp index 735bcff8f113..fbfe92318dc5 100644 --- a/contrib/llvm-project/clang/lib/AST/ODRHash.cpp +++ b/contrib/llvm-project/clang/lib/AST/ODRHash.cpp @@ -72,7 +72,10 @@ void ODRHash::AddDeclarationNameImpl(DeclarationName Name) { AddBoolean(S.isUnarySelector()); unsigned NumArgs = S.getNumArgs(); ID.AddInteger(NumArgs); - for (unsigned i = 0; i < NumArgs; ++i) { + // Compare all selector slots. For selectors with arguments it means all arg + // slots. And if there are no arguments, compare the first-and-only slot. + unsigned SlotsToCheck = NumArgs > 0 ? NumArgs : 1; + for (unsigned i = 0; i < SlotsToCheck; ++i) { const IdentifierInfo *II = S.getIdentifierInfoForSlot(i); AddBoolean(II); if (II) { @@ -143,13 +146,21 @@ void ODRHash::AddTemplateName(TemplateName Name) { case TemplateName::Template: AddDecl(Name.getAsTemplateDecl()); break; + case TemplateName::QualifiedTemplate: { + QualifiedTemplateName *QTN = Name.getAsQualifiedTemplateName(); + if (NestedNameSpecifier *NNS = QTN->getQualifier()) + AddNestedNameSpecifier(NNS); + AddBoolean(QTN->hasTemplateKeyword()); + AddTemplateName(QTN->getUnderlyingTemplate()); + break; + } // TODO: Support these cases. case TemplateName::OverloadedTemplate: case TemplateName::AssumedTemplate: - case TemplateName::QualifiedTemplate: case TemplateName::DependentTemplate: case TemplateName::SubstTemplateTemplateParm: case TemplateName::SubstTemplateTemplateParmPack: + case TemplateName::UsingTemplate: break; } } @@ -168,7 +179,17 @@ void ODRHash::AddTemplateArgument(TemplateArgument TA) { AddDecl(TA.getAsDecl()); break; case TemplateArgument::NullPtr: - case TemplateArgument::Integral: + ID.AddPointer(nullptr); + break; + case TemplateArgument::Integral: { + // There are integrals (e.g.: _BitInt(128)) that cannot be represented as + // any builtin integral type, so we use the hash of APSInt instead. + TA.getAsIntegral().Profile(ID); + break; + } + case TemplateArgument::StructuralValue: + AddQualType(TA.getStructuralValueType()); + AddStructuralValue(TA.getAsStructuralValue()); break; case TemplateArgument::Template: case TemplateArgument::TemplateExpansion: @@ -230,7 +251,7 @@ unsigned ODRHash::CalculateHash() { assert(I == Bools.rend()); Bools.clear(); - return ID.ComputeHash(); + return ID.computeStableHash(); } namespace { @@ -285,9 +306,9 @@ public: } void VisitValueDecl(const ValueDecl *D) { - if (!isa<FunctionDecl>(D)) { - AddQualType(D->getType()); - } + if (auto *DD = dyn_cast<DeclaratorDecl>(D); DD && DD->getTypeSourceInfo()) + AddQualType(DD->getTypeSourceInfo()->getType()); + Inherited::VisitValueDecl(D); } @@ -333,6 +354,20 @@ public: Inherited::VisitFieldDecl(D); } + void VisitObjCIvarDecl(const ObjCIvarDecl *D) { + ID.AddInteger(D->getCanonicalAccessControl()); + Inherited::VisitObjCIvarDecl(D); + } + + void VisitObjCPropertyDecl(const ObjCPropertyDecl *D) { + ID.AddInteger(D->getPropertyAttributes()); + ID.AddInteger(D->getPropertyImplementation()); + AddQualType(D->getTypeSourceInfo()->getType()); + AddDecl(D); + + Inherited::VisitObjCPropertyDecl(D); + } + void VisitFunctionDecl(const FunctionDecl *D) { // Handled by the ODRHash for FunctionDecl ID.AddInteger(D->getODRHash()); @@ -346,6 +381,64 @@ public: Inherited::VisitCXXMethodDecl(D); } + void VisitObjCMethodDecl(const ObjCMethodDecl *Method) { + ID.AddInteger(Method->getDeclKind()); + Hash.AddBoolean(Method->isInstanceMethod()); // false if class method + Hash.AddBoolean(Method->isVariadic()); + Hash.AddBoolean(Method->isSynthesizedAccessorStub()); + Hash.AddBoolean(Method->isDefined()); + Hash.AddBoolean(Method->isDirectMethod()); + Hash.AddBoolean(Method->isThisDeclarationADesignatedInitializer()); + Hash.AddBoolean(Method->hasSkippedBody()); + + ID.AddInteger(llvm::to_underlying(Method->getImplementationControl())); + ID.AddInteger(Method->getMethodFamily()); + ImplicitParamDecl *Cmd = Method->getCmdDecl(); + Hash.AddBoolean(Cmd); + if (Cmd) + ID.AddInteger(llvm::to_underlying(Cmd->getParameterKind())); + + ImplicitParamDecl *Self = Method->getSelfDecl(); + Hash.AddBoolean(Self); + if (Self) + ID.AddInteger(llvm::to_underlying(Self->getParameterKind())); + + AddDecl(Method); + + if (Method->getReturnTypeSourceInfo()) + AddQualType(Method->getReturnTypeSourceInfo()->getType()); + + ID.AddInteger(Method->param_size()); + for (auto Param : Method->parameters()) + Hash.AddSubDecl(Param); + + if (Method->hasBody()) { + const bool IsDefinition = Method->isThisDeclarationADefinition(); + Hash.AddBoolean(IsDefinition); + if (IsDefinition) { + Stmt *Body = Method->getBody(); + Hash.AddBoolean(Body); + if (Body) + AddStmt(Body); + + // Filter out sub-Decls which will not be processed in order to get an + // accurate count of Decl's. + llvm::SmallVector<const Decl *, 16> Decls; + for (Decl *SubDecl : Method->decls()) + if (ODRHash::isSubDeclToBeProcessed(SubDecl, Method)) + Decls.push_back(SubDecl); + + ID.AddInteger(Decls.size()); + for (auto SubDecl : Decls) + Hash.AddSubDecl(SubDecl); + } + } else { + Hash.AddBoolean(false); + } + + Inherited::VisitObjCMethodDecl(Method); + } + void VisitTypedefNameDecl(const TypedefNameDecl *D) { AddQualType(D->getUnderlyingType()); @@ -376,7 +469,7 @@ public: D->hasDefaultArgument() && !D->defaultArgumentWasInherited(); Hash.AddBoolean(hasDefaultArgument); if (hasDefaultArgument) { - AddTemplateArgument(D->getDefaultArgument()); + AddTemplateArgument(D->getDefaultArgument().getArgument()); } Hash.AddBoolean(D->isParameterPack()); @@ -394,7 +487,7 @@ public: D->hasDefaultArgument() && !D->defaultArgumentWasInherited(); Hash.AddBoolean(hasDefaultArgument); if (hasDefaultArgument) { - AddStmt(D->getDefaultArgument()); + AddTemplateArgument(D->getDefaultArgument().getArgument()); } Hash.AddBoolean(D->isParameterPack()); @@ -440,7 +533,7 @@ public: // Only allow a small portion of Decl's to be processed. Remove this once // all Decl's can be handled. -bool ODRHash::isDeclToBeProcessed(const Decl *D, const DeclContext *Parent) { +bool ODRHash::isSubDeclToBeProcessed(const Decl *D, const DeclContext *Parent) { if (D->isImplicit()) return false; if (D->getDeclContext() != Parent) return false; @@ -459,6 +552,9 @@ bool ODRHash::isDeclToBeProcessed(const Decl *D, const DeclContext *Parent) { case Decl::TypeAlias: case Decl::Typedef: case Decl::Var: + case Decl::ObjCMethod: + case Decl::ObjCIvar: + case Decl::ObjCProperty: return true; } } @@ -487,7 +583,7 @@ void ODRHash::AddCXXRecordDecl(const CXXRecordDecl *Record) { // accurate count of Decl's. llvm::SmallVector<const Decl *, 16> Decls; for (Decl *SubDecl : Record->decls()) { - if (isDeclToBeProcessed(SubDecl, Record)) { + if (isSubDeclToBeProcessed(SubDecl, Record)) { Decls.push_back(SubDecl); if (auto *Function = dyn_cast<FunctionDecl>(SubDecl)) { // Compute/Preload ODRHash into FunctionDecl. @@ -509,13 +605,58 @@ void ODRHash::AddCXXRecordDecl(const CXXRecordDecl *Record) { ID.AddInteger(Record->getNumBases()); auto Bases = Record->bases(); - for (auto Base : Bases) { - AddQualType(Base.getType()); + for (const auto &Base : Bases) { + AddQualType(Base.getTypeSourceInfo()->getType()); ID.AddInteger(Base.isVirtual()); ID.AddInteger(Base.getAccessSpecifierAsWritten()); } } +void ODRHash::AddRecordDecl(const RecordDecl *Record) { + assert(!isa<CXXRecordDecl>(Record) && + "For CXXRecordDecl should call AddCXXRecordDecl."); + AddDecl(Record); + + // Filter out sub-Decls which will not be processed in order to get an + // accurate count of Decl's. + llvm::SmallVector<const Decl *, 16> Decls; + for (Decl *SubDecl : Record->decls()) { + if (isSubDeclToBeProcessed(SubDecl, Record)) + Decls.push_back(SubDecl); + } + + ID.AddInteger(Decls.size()); + for (const Decl *SubDecl : Decls) + AddSubDecl(SubDecl); +} + +void ODRHash::AddObjCInterfaceDecl(const ObjCInterfaceDecl *IF) { + AddDecl(IF); + + auto *SuperClass = IF->getSuperClass(); + AddBoolean(SuperClass); + if (SuperClass) + ID.AddInteger(SuperClass->getODRHash()); + + // Hash referenced protocols. + ID.AddInteger(IF->getReferencedProtocols().size()); + for (const ObjCProtocolDecl *RefP : IF->protocols()) { + // Hash the name only as a referenced protocol can be a forward declaration. + AddDeclarationName(RefP->getDeclName()); + } + + // Filter out sub-Decls which will not be processed in order to get an + // accurate count of Decl's. + llvm::SmallVector<const Decl *, 16> Decls; + for (Decl *SubDecl : IF->decls()) + if (isSubDeclToBeProcessed(SubDecl, IF)) + Decls.push_back(SubDecl); + + ID.AddInteger(Decls.size()); + for (auto *SubDecl : Decls) + AddSubDecl(SubDecl); +} + void ODRHash::AddFunctionDecl(const FunctionDecl *Function, bool SkipBody) { assert(Function && "Expecting non-null pointer."); @@ -528,6 +669,10 @@ void ODRHash::AddFunctionDecl(const FunctionDecl *Function, if (F->isFunctionTemplateSpecialization()) { if (!isa<CXXMethodDecl>(DC)) return; if (DC->getLexicalParent()->isFileContext()) return; + // Skip class scope explicit function template specializations, + // as they have not yet been instantiated. + if (F->getDependentSpecializationInfo()) + return; // Inline method specializations are the only supported // specialization for now. } @@ -554,16 +699,22 @@ void ODRHash::AddFunctionDecl(const FunctionDecl *Function, ID.AddInteger(Function->getStorageClass()); AddBoolean(Function->isInlineSpecified()); AddBoolean(Function->isVirtualAsWritten()); - AddBoolean(Function->isPure()); + AddBoolean(Function->isPureVirtual()); AddBoolean(Function->isDeletedAsWritten()); AddBoolean(Function->isExplicitlyDefaulted()); + StringLiteral *DeletedMessage = Function->getDeletedMessage(); + AddBoolean(DeletedMessage); + + if (DeletedMessage) + ID.AddString(DeletedMessage->getBytes()); + AddDecl(Function); AddQualType(Function->getReturnType()); ID.AddInteger(Function->param_size()); - for (auto Param : Function->parameters()) + for (auto *Param : Function->parameters()) AddSubDecl(Param); if (SkipBody) { @@ -588,7 +739,7 @@ void ODRHash::AddFunctionDecl(const FunctionDecl *Function, // accurate count of Decl's. llvm::SmallVector<const Decl *, 16> Decls; for (Decl *SubDecl : Function->decls()) { - if (isDeclToBeProcessed(SubDecl, Function)) { + if (isSubDeclToBeProcessed(SubDecl, Function)) { Decls.push_back(SubDecl); } } @@ -608,13 +759,13 @@ void ODRHash::AddEnumDecl(const EnumDecl *Enum) { AddBoolean(Enum->isScopedUsingClassTag()); if (Enum->getIntegerTypeSourceInfo()) - AddQualType(Enum->getIntegerType()); + AddQualType(Enum->getIntegerType().getCanonicalType()); // Filter out sub-Decls which will not be processed in order to get an // accurate count of Decl's. llvm::SmallVector<const Decl *, 16> Decls; for (Decl *SubDecl : Enum->decls()) { - if (isDeclToBeProcessed(SubDecl, Enum)) { + if (isSubDeclToBeProcessed(SubDecl, Enum)) { assert(isa<EnumConstantDecl>(SubDecl) && "Unexpected Decl"); Decls.push_back(SubDecl); } @@ -627,6 +778,31 @@ void ODRHash::AddEnumDecl(const EnumDecl *Enum) { } +void ODRHash::AddObjCProtocolDecl(const ObjCProtocolDecl *P) { + AddDecl(P); + + // Hash referenced protocols. + ID.AddInteger(P->getReferencedProtocols().size()); + for (const ObjCProtocolDecl *RefP : P->protocols()) { + // Hash the name only as a referenced protocol can be a forward declaration. + AddDeclarationName(RefP->getDeclName()); + } + + // Filter out sub-Decls which will not be processed in order to get an + // accurate count of Decl's. + llvm::SmallVector<const Decl *, 16> Decls; + for (Decl *SubDecl : P->decls()) { + if (isSubDeclToBeProcessed(SubDecl, P)) { + Decls.push_back(SubDecl); + } + } + + ID.AddInteger(Decls.size()); + for (auto *SubDecl : Decls) { + AddSubDecl(SubDecl); + } +} + void ODRHash::AddDecl(const Decl *D) { assert(D && "Expecting non-null pointer."); D = D->getCanonicalDecl(); @@ -670,7 +846,7 @@ public: } } - void AddDecl(Decl *D) { + void AddDecl(const Decl *D) { Hash.AddBoolean(D); if (D) { Hash.AddDecl(D); @@ -759,29 +935,7 @@ public: void VisitType(const Type *T) {} void VisitAdjustedType(const AdjustedType *T) { - QualType Original = T->getOriginalType(); - QualType Adjusted = T->getAdjustedType(); - - // The original type and pointee type can be the same, as in the case of - // function pointers decaying to themselves. Set a bool and only process - // the type once, to prevent doubling the work. - SplitQualType split = Adjusted.split(); - if (auto Pointer = dyn_cast<PointerType>(split.Ty)) { - if (Pointer->getPointeeType() == Original) { - Hash.AddBoolean(true); - ID.AddInteger(split.Quals.getAsOpaqueValue()); - AddQualType(Original); - VisitType(T); - return; - } - } - - // The original type and pointee type are different, such as in the case - // of a array decaying to an element pointer. Set a bool to false and - // process both types. - Hash.AddBoolean(false); - AddQualType(Original); - AddQualType(Adjusted); + AddQualType(T->getOriginalType()); VisitType(T); } @@ -794,7 +948,7 @@ public: void VisitArrayType(const ArrayType *T) { AddQualType(T->getElementType()); - ID.AddInteger(T->getSizeModifier()); + ID.AddInteger(llvm::to_underlying(T->getSizeModifier())); VisitQualifiers(T->getIndexTypeQualifiers()); VisitType(T); } @@ -803,6 +957,10 @@ public: VisitArrayType(T); } + void VisitArrayParameterType(const ArrayParameterType *T) { + VisitConstantArrayType(T); + } + void VisitDependentSizedArrayType(const DependentSizedArrayType *T) { AddStmt(T->getSizeExpr()); VisitArrayType(T); @@ -820,7 +978,6 @@ public: void VisitAttributedType(const AttributedType *T) { ID.AddInteger(T->getAttrKind()); AddQualType(T->getModifiedType()); - AddQualType(T->getEquivalentType()); VisitType(T); } @@ -842,7 +999,6 @@ public: void VisitDecltypeType(const DecltypeType *T) { AddStmt(T->getUnderlyingExpr()); - AddQualType(T->getUnderlyingType()); VisitType(T); } @@ -860,7 +1016,7 @@ public: ID.AddInteger(T->isConstrained()); if (T->isConstrained()) { AddDecl(T->getTypeConstraintConcept()); - ID.AddInteger(T->getNumArgs()); + ID.AddInteger(T->getTypeConstraintArguments().size()); for (const auto &TA : T->getTypeConstraintArguments()) Hash.AddTemplateArgument(TA); } @@ -933,7 +1089,7 @@ public: auto Protocols = T->getProtocols(); ID.AddInteger(Protocols.size()); - for (auto Protocol : Protocols) { + for (auto *Protocol : Protocols) { AddDecl(Protocol); } @@ -951,7 +1107,7 @@ public: AddDecl(T->getDecl()); auto Protocols = T->getProtocols(); ID.AddInteger(Protocols.size()); - for (auto Protocol : Protocols) { + for (auto *Protocol : Protocols) { AddDecl(Protocol); } @@ -994,13 +1150,13 @@ public: void VisitSubstTemplateTypeParmPackType(const SubstTemplateTypeParmPackType *T) { - AddType(T->getReplacedParameter()); + AddDecl(T->getAssociatedDecl()); Hash.AddTemplateArgument(T->getArgumentPack()); VisitType(T); } void VisitSubstTemplateTypeParmType(const SubstTemplateTypeParmType *T) { - AddType(T->getReplacedParameter()); + AddDecl(T->getAssociatedDecl()); AddQualType(T->getReplacementType()); VisitType(T); } @@ -1014,7 +1170,7 @@ public: void VisitEnumType(const EnumType *T) { VisitTagType(T); } void VisitTemplateSpecializationType(const TemplateSpecializationType *T) { - ID.AddInteger(T->getNumArgs()); + ID.AddInteger(T->template_arguments().size()); for (const auto &TA : T->template_arguments()) { Hash.AddTemplateArgument(TA); } @@ -1031,41 +1187,22 @@ public: void VisitTypedefType(const TypedefType *T) { AddDecl(T->getDecl()); - QualType UnderlyingType = T->getDecl()->getUnderlyingType(); - VisitQualifiers(UnderlyingType.getQualifiers()); - while (true) { - if (const TypedefType *Underlying = - dyn_cast<TypedefType>(UnderlyingType.getTypePtr())) { - UnderlyingType = Underlying->getDecl()->getUnderlyingType(); - continue; - } - if (const ElaboratedType *Underlying = - dyn_cast<ElaboratedType>(UnderlyingType.getTypePtr())) { - UnderlyingType = Underlying->getNamedType(); - continue; - } - - break; - } - AddType(UnderlyingType.getTypePtr()); VisitType(T); } void VisitTypeOfExprType(const TypeOfExprType *T) { AddStmt(T->getUnderlyingExpr()); Hash.AddBoolean(T->isSugared()); - if (T->isSugared()) - AddQualType(T->desugar()); VisitType(T); } void VisitTypeOfType(const TypeOfType *T) { - AddQualType(T->getUnderlyingType()); + AddQualType(T->getUnmodifiedType()); VisitType(T); } void VisitTypeWithKeyword(const TypeWithKeyword *T) { - ID.AddInteger(T->getKeyword()); + ID.AddInteger(llvm::to_underlying(T->getKeyword())); VisitType(T); }; @@ -1079,7 +1216,7 @@ public: const DependentTemplateSpecializationType *T) { AddIdentifierInfo(T->getIdentifier()); AddNestedNameSpecifier(T->getQualifier()); - ID.AddInteger(T->getNumArgs()); + ID.AddInteger(T->template_arguments().size()); for (const auto &TA : T->template_arguments()) { Hash.AddTemplateArgument(TA); } @@ -1106,7 +1243,7 @@ public: void VisitVectorType(const VectorType *T) { AddQualType(T->getElementType()); ID.AddInteger(T->getNumElements()); - ID.AddInteger(T->getVectorKind()); + ID.AddInteger(llvm::to_underlying(T->getVectorKind())); VisitType(T); } @@ -1133,3 +1270,66 @@ void ODRHash::AddQualType(QualType T) { void ODRHash::AddBoolean(bool Value) { Bools.push_back(Value); } + +void ODRHash::AddStructuralValue(const APValue &Value) { + ID.AddInteger(Value.getKind()); + + // 'APValue::Profile' uses pointer values to make hash for LValue and + // MemberPointer, but they differ from one compiler invocation to another. + // So, handle them explicitly here. + + switch (Value.getKind()) { + case APValue::LValue: { + const APValue::LValueBase &Base = Value.getLValueBase(); + if (!Base) { + ID.AddInteger(Value.getLValueOffset().getQuantity()); + break; + } + + assert(Base.is<const ValueDecl *>()); + AddDecl(Base.get<const ValueDecl *>()); + ID.AddInteger(Value.getLValueOffset().getQuantity()); + + bool OnePastTheEnd = Value.isLValueOnePastTheEnd(); + if (Value.hasLValuePath()) { + QualType TypeSoFar = Base.getType(); + for (APValue::LValuePathEntry E : Value.getLValuePath()) { + if (const auto *AT = TypeSoFar->getAsArrayTypeUnsafe()) { + if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) + OnePastTheEnd |= CAT->getSize() == E.getAsArrayIndex(); + TypeSoFar = AT->getElementType(); + } else { + const Decl *D = E.getAsBaseOrMember().getPointer(); + if (const auto *FD = dyn_cast<FieldDecl>(D)) { + if (FD->getParent()->isUnion()) + ID.AddInteger(FD->getFieldIndex()); + TypeSoFar = FD->getType(); + } else { + TypeSoFar = + D->getASTContext().getRecordType(cast<CXXRecordDecl>(D)); + } + } + } + } + unsigned Val = 0; + if (Value.isNullPointer()) + Val |= 1 << 0; + if (OnePastTheEnd) + Val |= 1 << 1; + if (Value.hasLValuePath()) + Val |= 1 << 2; + ID.AddInteger(Val); + break; + } + case APValue::MemberPointer: { + const ValueDecl *D = Value.getMemberPointerDecl(); + assert(D); + AddDecl(D); + ID.AddInteger( + D->getASTContext().getMemberPointerPathAdjustment(Value).getQuantity()); + break; + } + default: + Value.Profile(ID); + } +} diff --git a/contrib/llvm-project/clang/lib/AST/OSLog.cpp b/contrib/llvm-project/clang/lib/AST/OSLog.cpp index 094c0102854b..5e320416b30d 100644 --- a/contrib/llvm-project/clang/lib/AST/OSLog.cpp +++ b/contrib/llvm-project/clang/lib/AST/OSLog.cpp @@ -8,6 +8,7 @@ #include "clang/AST/FormatString.h" #include "clang/Basic/Builtins.h" #include "llvm/ADT/SmallBitVector.h" +#include <optional> using namespace clang; @@ -20,11 +21,11 @@ class OSLogFormatStringHandler private: struct ArgData { const Expr *E = nullptr; - Optional<OSLogBufferItem::Kind> Kind; - Optional<unsigned> Size; - Optional<const Expr *> Count; - Optional<const Expr *> Precision; - Optional<const Expr *> FieldWidth; + std::optional<OSLogBufferItem::Kind> Kind; + std::optional<unsigned> Size; + std::optional<const Expr *> Count; + std::optional<const Expr *> Precision; + std::optional<const Expr *> FieldWidth; unsigned char Flags = 0; StringRef MaskType; }; @@ -56,8 +57,8 @@ public: } bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, - const char *StartSpecifier, - unsigned SpecifierLen) override { + const char *StartSpecifier, unsigned SpecifierLen, + const TargetInfo &) override { if (!FS.consumesDataArgument() && FS.getConversionSpecifier().getKind() != clang::analyze_format_string::ConversionSpecifier::PrintErrno) @@ -201,7 +202,7 @@ bool clang::analyze_os_log::computeOSLogBufferLayout( } const StringLiteral *Lit = cast<StringLiteral>(StringArg->IgnoreParenCasts()); - assert(Lit && (Lit->isAscii() || Lit->isUTF8())); + assert(Lit && (Lit->isOrdinary() || Lit->isUTF8())); StringRef Data = Lit->getString(); OSLogFormatStringHandler H(VarArgs); ParsePrintfString(H, Data.begin(), Data.end(), Ctx.getLangOpts(), diff --git a/contrib/llvm-project/clang/lib/AST/OpenACCClause.cpp b/contrib/llvm-project/clang/lib/AST/OpenACCClause.cpp new file mode 100644 index 000000000000..95089a9b79e2 --- /dev/null +++ b/contrib/llvm-project/clang/lib/AST/OpenACCClause.cpp @@ -0,0 +1,552 @@ +//===---- OpenACCClause.cpp - Classes for OpenACC Clauses ----------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements the subclasses of the OpenACCClause class declared in +// OpenACCClause.h +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/OpenACCClause.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Expr.h" + +using namespace clang; + +bool OpenACCClauseWithParams::classof(const OpenACCClause *C) { + return OpenACCDeviceTypeClause::classof(C) || + OpenACCClauseWithCondition::classof(C) || + OpenACCClauseWithExprs::classof(C); +} +bool OpenACCClauseWithExprs::classof(const OpenACCClause *C) { + return OpenACCWaitClause::classof(C) || OpenACCNumGangsClause::classof(C) || + OpenACCClauseWithSingleIntExpr::classof(C) || + OpenACCClauseWithVarList::classof(C); +} +bool OpenACCClauseWithVarList::classof(const OpenACCClause *C) { + return OpenACCPrivateClause::classof(C) || + OpenACCFirstPrivateClause::classof(C) || + OpenACCDevicePtrClause::classof(C) || + OpenACCDevicePtrClause::classof(C) || + OpenACCAttachClause::classof(C) || OpenACCNoCreateClause::classof(C) || + OpenACCPresentClause::classof(C) || OpenACCCopyClause::classof(C) || + OpenACCCopyInClause::classof(C) || OpenACCCopyOutClause::classof(C) || + OpenACCReductionClause::classof(C) || OpenACCCreateClause::classof(C); +} +bool OpenACCClauseWithCondition::classof(const OpenACCClause *C) { + return OpenACCIfClause::classof(C) || OpenACCSelfClause::classof(C); +} +bool OpenACCClauseWithSingleIntExpr::classof(const OpenACCClause *C) { + return OpenACCNumWorkersClause::classof(C) || + OpenACCVectorLengthClause::classof(C) || + OpenACCAsyncClause::classof(C); +} +OpenACCDefaultClause *OpenACCDefaultClause::Create(const ASTContext &C, + OpenACCDefaultClauseKind K, + SourceLocation BeginLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { + void *Mem = + C.Allocate(sizeof(OpenACCDefaultClause), alignof(OpenACCDefaultClause)); + + return new (Mem) OpenACCDefaultClause(K, BeginLoc, LParenLoc, EndLoc); +} + +OpenACCIfClause *OpenACCIfClause::Create(const ASTContext &C, + SourceLocation BeginLoc, + SourceLocation LParenLoc, + Expr *ConditionExpr, + SourceLocation EndLoc) { + void *Mem = C.Allocate(sizeof(OpenACCIfClause), alignof(OpenACCIfClause)); + return new (Mem) OpenACCIfClause(BeginLoc, LParenLoc, ConditionExpr, EndLoc); +} + +OpenACCIfClause::OpenACCIfClause(SourceLocation BeginLoc, + SourceLocation LParenLoc, Expr *ConditionExpr, + SourceLocation EndLoc) + : OpenACCClauseWithCondition(OpenACCClauseKind::If, BeginLoc, LParenLoc, + ConditionExpr, EndLoc) { + assert(ConditionExpr && "if clause requires condition expr"); + assert((ConditionExpr->isInstantiationDependent() || + ConditionExpr->getType()->isScalarType()) && + "Condition expression type not scalar/dependent"); +} + +OpenACCSelfClause *OpenACCSelfClause::Create(const ASTContext &C, + SourceLocation BeginLoc, + SourceLocation LParenLoc, + Expr *ConditionExpr, + SourceLocation EndLoc) { + void *Mem = C.Allocate(sizeof(OpenACCIfClause), alignof(OpenACCIfClause)); + return new (Mem) + OpenACCSelfClause(BeginLoc, LParenLoc, ConditionExpr, EndLoc); +} + +OpenACCSelfClause::OpenACCSelfClause(SourceLocation BeginLoc, + SourceLocation LParenLoc, + Expr *ConditionExpr, SourceLocation EndLoc) + : OpenACCClauseWithCondition(OpenACCClauseKind::Self, BeginLoc, LParenLoc, + ConditionExpr, EndLoc) { + assert((!ConditionExpr || ConditionExpr->isInstantiationDependent() || + ConditionExpr->getType()->isScalarType()) && + "Condition expression type not scalar/dependent"); +} + +OpenACCClause::child_range OpenACCClause::children() { + switch (getClauseKind()) { + default: + assert(false && "Clause children function not implemented"); + break; +#define VISIT_CLAUSE(CLAUSE_NAME) \ + case OpenACCClauseKind::CLAUSE_NAME: \ + return cast<OpenACC##CLAUSE_NAME##Clause>(this)->children(); +#define CLAUSE_ALIAS(ALIAS_NAME, CLAUSE_NAME, DEPRECATED) \ + case OpenACCClauseKind::ALIAS_NAME: \ + return cast<OpenACC##CLAUSE_NAME##Clause>(this)->children(); + +#include "clang/Basic/OpenACCClauses.def" + } + return child_range(child_iterator(), child_iterator()); +} + +OpenACCNumWorkersClause::OpenACCNumWorkersClause(SourceLocation BeginLoc, + SourceLocation LParenLoc, + Expr *IntExpr, + SourceLocation EndLoc) + : OpenACCClauseWithSingleIntExpr(OpenACCClauseKind::NumWorkers, BeginLoc, + LParenLoc, IntExpr, EndLoc) { + assert((!IntExpr || IntExpr->isInstantiationDependent() || + IntExpr->getType()->isIntegerType()) && + "Condition expression type not scalar/dependent"); +} + +OpenACCNumWorkersClause * +OpenACCNumWorkersClause::Create(const ASTContext &C, SourceLocation BeginLoc, + SourceLocation LParenLoc, Expr *IntExpr, + SourceLocation EndLoc) { + void *Mem = C.Allocate(sizeof(OpenACCNumWorkersClause), + alignof(OpenACCNumWorkersClause)); + return new (Mem) + OpenACCNumWorkersClause(BeginLoc, LParenLoc, IntExpr, EndLoc); +} + +OpenACCVectorLengthClause::OpenACCVectorLengthClause(SourceLocation BeginLoc, + SourceLocation LParenLoc, + Expr *IntExpr, + SourceLocation EndLoc) + : OpenACCClauseWithSingleIntExpr(OpenACCClauseKind::VectorLength, BeginLoc, + LParenLoc, IntExpr, EndLoc) { + assert((!IntExpr || IntExpr->isInstantiationDependent() || + IntExpr->getType()->isIntegerType()) && + "Condition expression type not scalar/dependent"); +} + +OpenACCVectorLengthClause * +OpenACCVectorLengthClause::Create(const ASTContext &C, SourceLocation BeginLoc, + SourceLocation LParenLoc, Expr *IntExpr, + SourceLocation EndLoc) { + void *Mem = C.Allocate(sizeof(OpenACCVectorLengthClause), + alignof(OpenACCVectorLengthClause)); + return new (Mem) + OpenACCVectorLengthClause(BeginLoc, LParenLoc, IntExpr, EndLoc); +} + +OpenACCAsyncClause::OpenACCAsyncClause(SourceLocation BeginLoc, + SourceLocation LParenLoc, Expr *IntExpr, + SourceLocation EndLoc) + : OpenACCClauseWithSingleIntExpr(OpenACCClauseKind::Async, BeginLoc, + LParenLoc, IntExpr, EndLoc) { + assert((!IntExpr || IntExpr->isInstantiationDependent() || + IntExpr->getType()->isIntegerType()) && + "Condition expression type not scalar/dependent"); +} + +OpenACCAsyncClause *OpenACCAsyncClause::Create(const ASTContext &C, + SourceLocation BeginLoc, + SourceLocation LParenLoc, + Expr *IntExpr, + SourceLocation EndLoc) { + void *Mem = + C.Allocate(sizeof(OpenACCAsyncClause), alignof(OpenACCAsyncClause)); + return new (Mem) OpenACCAsyncClause(BeginLoc, LParenLoc, IntExpr, EndLoc); +} + +OpenACCWaitClause *OpenACCWaitClause::Create( + const ASTContext &C, SourceLocation BeginLoc, SourceLocation LParenLoc, + Expr *DevNumExpr, SourceLocation QueuesLoc, ArrayRef<Expr *> QueueIdExprs, + SourceLocation EndLoc) { + // Allocates enough room in trailing storage for all the int-exprs, plus a + // placeholder for the devnum. + void *Mem = C.Allocate( + OpenACCWaitClause::totalSizeToAlloc<Expr *>(QueueIdExprs.size() + 1)); + return new (Mem) OpenACCWaitClause(BeginLoc, LParenLoc, DevNumExpr, QueuesLoc, + QueueIdExprs, EndLoc); +} + +OpenACCNumGangsClause *OpenACCNumGangsClause::Create(const ASTContext &C, + SourceLocation BeginLoc, + SourceLocation LParenLoc, + ArrayRef<Expr *> IntExprs, + SourceLocation EndLoc) { + void *Mem = C.Allocate( + OpenACCNumGangsClause::totalSizeToAlloc<Expr *>(IntExprs.size())); + return new (Mem) OpenACCNumGangsClause(BeginLoc, LParenLoc, IntExprs, EndLoc); +} + +OpenACCPrivateClause *OpenACCPrivateClause::Create(const ASTContext &C, + SourceLocation BeginLoc, + SourceLocation LParenLoc, + ArrayRef<Expr *> VarList, + SourceLocation EndLoc) { + void *Mem = C.Allocate( + OpenACCPrivateClause::totalSizeToAlloc<Expr *>(VarList.size())); + return new (Mem) OpenACCPrivateClause(BeginLoc, LParenLoc, VarList, EndLoc); +} + +OpenACCFirstPrivateClause *OpenACCFirstPrivateClause::Create( + const ASTContext &C, SourceLocation BeginLoc, SourceLocation LParenLoc, + ArrayRef<Expr *> VarList, SourceLocation EndLoc) { + void *Mem = C.Allocate( + OpenACCFirstPrivateClause::totalSizeToAlloc<Expr *>(VarList.size())); + return new (Mem) + OpenACCFirstPrivateClause(BeginLoc, LParenLoc, VarList, EndLoc); +} + +OpenACCAttachClause *OpenACCAttachClause::Create(const ASTContext &C, + SourceLocation BeginLoc, + SourceLocation LParenLoc, + ArrayRef<Expr *> VarList, + SourceLocation EndLoc) { + void *Mem = + C.Allocate(OpenACCAttachClause::totalSizeToAlloc<Expr *>(VarList.size())); + return new (Mem) OpenACCAttachClause(BeginLoc, LParenLoc, VarList, EndLoc); +} + +OpenACCDevicePtrClause *OpenACCDevicePtrClause::Create(const ASTContext &C, + SourceLocation BeginLoc, + SourceLocation LParenLoc, + ArrayRef<Expr *> VarList, + SourceLocation EndLoc) { + void *Mem = C.Allocate( + OpenACCDevicePtrClause::totalSizeToAlloc<Expr *>(VarList.size())); + return new (Mem) OpenACCDevicePtrClause(BeginLoc, LParenLoc, VarList, EndLoc); +} + +OpenACCNoCreateClause *OpenACCNoCreateClause::Create(const ASTContext &C, + SourceLocation BeginLoc, + SourceLocation LParenLoc, + ArrayRef<Expr *> VarList, + SourceLocation EndLoc) { + void *Mem = C.Allocate( + OpenACCNoCreateClause::totalSizeToAlloc<Expr *>(VarList.size())); + return new (Mem) OpenACCNoCreateClause(BeginLoc, LParenLoc, VarList, EndLoc); +} + +OpenACCPresentClause *OpenACCPresentClause::Create(const ASTContext &C, + SourceLocation BeginLoc, + SourceLocation LParenLoc, + ArrayRef<Expr *> VarList, + SourceLocation EndLoc) { + void *Mem = C.Allocate( + OpenACCPresentClause::totalSizeToAlloc<Expr *>(VarList.size())); + return new (Mem) OpenACCPresentClause(BeginLoc, LParenLoc, VarList, EndLoc); +} + +OpenACCCopyClause * +OpenACCCopyClause::Create(const ASTContext &C, OpenACCClauseKind Spelling, + SourceLocation BeginLoc, SourceLocation LParenLoc, + ArrayRef<Expr *> VarList, SourceLocation EndLoc) { + void *Mem = + C.Allocate(OpenACCCopyClause::totalSizeToAlloc<Expr *>(VarList.size())); + return new (Mem) + OpenACCCopyClause(Spelling, BeginLoc, LParenLoc, VarList, EndLoc); +} + +OpenACCCopyInClause * +OpenACCCopyInClause::Create(const ASTContext &C, OpenACCClauseKind Spelling, + SourceLocation BeginLoc, SourceLocation LParenLoc, + bool IsReadOnly, ArrayRef<Expr *> VarList, + SourceLocation EndLoc) { + void *Mem = + C.Allocate(OpenACCCopyInClause::totalSizeToAlloc<Expr *>(VarList.size())); + return new (Mem) OpenACCCopyInClause(Spelling, BeginLoc, LParenLoc, + IsReadOnly, VarList, EndLoc); +} + +OpenACCCopyOutClause * +OpenACCCopyOutClause::Create(const ASTContext &C, OpenACCClauseKind Spelling, + SourceLocation BeginLoc, SourceLocation LParenLoc, + bool IsZero, ArrayRef<Expr *> VarList, + SourceLocation EndLoc) { + void *Mem = C.Allocate( + OpenACCCopyOutClause::totalSizeToAlloc<Expr *>(VarList.size())); + return new (Mem) OpenACCCopyOutClause(Spelling, BeginLoc, LParenLoc, IsZero, + VarList, EndLoc); +} + +OpenACCCreateClause * +OpenACCCreateClause::Create(const ASTContext &C, OpenACCClauseKind Spelling, + SourceLocation BeginLoc, SourceLocation LParenLoc, + bool IsZero, ArrayRef<Expr *> VarList, + SourceLocation EndLoc) { + void *Mem = + C.Allocate(OpenACCCreateClause::totalSizeToAlloc<Expr *>(VarList.size())); + return new (Mem) OpenACCCreateClause(Spelling, BeginLoc, LParenLoc, IsZero, + VarList, EndLoc); +} + +OpenACCDeviceTypeClause *OpenACCDeviceTypeClause::Create( + const ASTContext &C, OpenACCClauseKind K, SourceLocation BeginLoc, + SourceLocation LParenLoc, ArrayRef<DeviceTypeArgument> Archs, + SourceLocation EndLoc) { + void *Mem = + C.Allocate(OpenACCDeviceTypeClause::totalSizeToAlloc<DeviceTypeArgument>( + Archs.size())); + return new (Mem) + OpenACCDeviceTypeClause(K, BeginLoc, LParenLoc, Archs, EndLoc); +} + +OpenACCReductionClause *OpenACCReductionClause::Create( + const ASTContext &C, SourceLocation BeginLoc, SourceLocation LParenLoc, + OpenACCReductionOperator Operator, ArrayRef<Expr *> VarList, + SourceLocation EndLoc) { + void *Mem = C.Allocate( + OpenACCReductionClause::totalSizeToAlloc<Expr *>(VarList.size())); + return new (Mem) + OpenACCReductionClause(BeginLoc, LParenLoc, Operator, VarList, EndLoc); +} + +OpenACCAutoClause *OpenACCAutoClause::Create(const ASTContext &C, + SourceLocation BeginLoc, + SourceLocation EndLoc) { + void *Mem = C.Allocate(sizeof(OpenACCAutoClause)); + return new (Mem) OpenACCAutoClause(BeginLoc, EndLoc); +} + +OpenACCIndependentClause * +OpenACCIndependentClause::Create(const ASTContext &C, SourceLocation BeginLoc, + SourceLocation EndLoc) { + void *Mem = C.Allocate(sizeof(OpenACCIndependentClause)); + return new (Mem) OpenACCIndependentClause(BeginLoc, EndLoc); +} + +OpenACCSeqClause *OpenACCSeqClause::Create(const ASTContext &C, + SourceLocation BeginLoc, + SourceLocation EndLoc) { + void *Mem = C.Allocate(sizeof(OpenACCSeqClause)); + return new (Mem) OpenACCSeqClause(BeginLoc, EndLoc); +} + +OpenACCGangClause *OpenACCGangClause::Create(const ASTContext &C, + SourceLocation BeginLoc, + SourceLocation EndLoc) { + void *Mem = C.Allocate(sizeof(OpenACCGangClause)); + return new (Mem) OpenACCGangClause(BeginLoc, EndLoc); +} + +OpenACCWorkerClause *OpenACCWorkerClause::Create(const ASTContext &C, + SourceLocation BeginLoc, + SourceLocation EndLoc) { + void *Mem = C.Allocate(sizeof(OpenACCWorkerClause)); + return new (Mem) OpenACCWorkerClause(BeginLoc, EndLoc); +} + +OpenACCVectorClause *OpenACCVectorClause::Create(const ASTContext &C, + SourceLocation BeginLoc, + SourceLocation EndLoc) { + void *Mem = C.Allocate(sizeof(OpenACCVectorClause)); + return new (Mem) OpenACCVectorClause(BeginLoc, EndLoc); +} + +//===----------------------------------------------------------------------===// +// OpenACC clauses printing methods +//===----------------------------------------------------------------------===// + +void OpenACCClausePrinter::printExpr(const Expr *E) { + E->printPretty(OS, nullptr, Policy, 0); +} + +void OpenACCClausePrinter::VisitDefaultClause(const OpenACCDefaultClause &C) { + OS << "default(" << C.getDefaultClauseKind() << ")"; +} + +void OpenACCClausePrinter::VisitIfClause(const OpenACCIfClause &C) { + OS << "if("; + printExpr(C.getConditionExpr()); + OS << ")"; +} + +void OpenACCClausePrinter::VisitSelfClause(const OpenACCSelfClause &C) { + OS << "self"; + if (const Expr *CondExpr = C.getConditionExpr()) { + OS << "("; + printExpr(CondExpr); + OS << ")"; + } +} + +void OpenACCClausePrinter::VisitNumGangsClause(const OpenACCNumGangsClause &C) { + OS << "num_gangs("; + llvm::interleaveComma(C.getIntExprs(), OS, + [&](const Expr *E) { printExpr(E); }); + OS << ")"; +} + +void OpenACCClausePrinter::VisitNumWorkersClause( + const OpenACCNumWorkersClause &C) { + OS << "num_workers("; + printExpr(C.getIntExpr()); + OS << ")"; +} + +void OpenACCClausePrinter::VisitVectorLengthClause( + const OpenACCVectorLengthClause &C) { + OS << "vector_length("; + printExpr(C.getIntExpr()); + OS << ")"; +} + +void OpenACCClausePrinter::VisitAsyncClause(const OpenACCAsyncClause &C) { + OS << "async"; + if (C.hasIntExpr()) { + OS << "("; + printExpr(C.getIntExpr()); + OS << ")"; + } +} + +void OpenACCClausePrinter::VisitPrivateClause(const OpenACCPrivateClause &C) { + OS << "private("; + llvm::interleaveComma(C.getVarList(), OS, + [&](const Expr *E) { printExpr(E); }); + OS << ")"; +} + +void OpenACCClausePrinter::VisitFirstPrivateClause( + const OpenACCFirstPrivateClause &C) { + OS << "firstprivate("; + llvm::interleaveComma(C.getVarList(), OS, + [&](const Expr *E) { printExpr(E); }); + OS << ")"; +} + +void OpenACCClausePrinter::VisitAttachClause(const OpenACCAttachClause &C) { + OS << "attach("; + llvm::interleaveComma(C.getVarList(), OS, + [&](const Expr *E) { printExpr(E); }); + OS << ")"; +} + +void OpenACCClausePrinter::VisitDevicePtrClause( + const OpenACCDevicePtrClause &C) { + OS << "deviceptr("; + llvm::interleaveComma(C.getVarList(), OS, + [&](const Expr *E) { printExpr(E); }); + OS << ")"; +} + +void OpenACCClausePrinter::VisitNoCreateClause(const OpenACCNoCreateClause &C) { + OS << "no_create("; + llvm::interleaveComma(C.getVarList(), OS, + [&](const Expr *E) { printExpr(E); }); + OS << ")"; +} + +void OpenACCClausePrinter::VisitPresentClause(const OpenACCPresentClause &C) { + OS << "present("; + llvm::interleaveComma(C.getVarList(), OS, + [&](const Expr *E) { printExpr(E); }); + OS << ")"; +} + +void OpenACCClausePrinter::VisitCopyClause(const OpenACCCopyClause &C) { + OS << C.getClauseKind() << '('; + llvm::interleaveComma(C.getVarList(), OS, + [&](const Expr *E) { printExpr(E); }); + OS << ")"; +} + +void OpenACCClausePrinter::VisitCopyInClause(const OpenACCCopyInClause &C) { + OS << C.getClauseKind() << '('; + if (C.isReadOnly()) + OS << "readonly: "; + llvm::interleaveComma(C.getVarList(), OS, + [&](const Expr *E) { printExpr(E); }); + OS << ")"; +} + +void OpenACCClausePrinter::VisitCopyOutClause(const OpenACCCopyOutClause &C) { + OS << C.getClauseKind() << '('; + if (C.isZero()) + OS << "zero: "; + llvm::interleaveComma(C.getVarList(), OS, + [&](const Expr *E) { printExpr(E); }); + OS << ")"; +} + +void OpenACCClausePrinter::VisitCreateClause(const OpenACCCreateClause &C) { + OS << C.getClauseKind() << '('; + if (C.isZero()) + OS << "zero: "; + llvm::interleaveComma(C.getVarList(), OS, + [&](const Expr *E) { printExpr(E); }); + OS << ")"; +} + +void OpenACCClausePrinter::VisitReductionClause( + const OpenACCReductionClause &C) { + OS << "reduction(" << C.getReductionOp() << ": "; + llvm::interleaveComma(C.getVarList(), OS, + [&](const Expr *E) { printExpr(E); }); + OS << ")"; +} + +void OpenACCClausePrinter::VisitWaitClause(const OpenACCWaitClause &C) { + OS << "wait"; + if (!C.getLParenLoc().isInvalid()) { + OS << "("; + if (C.hasDevNumExpr()) { + OS << "devnum: "; + printExpr(C.getDevNumExpr()); + OS << " : "; + } + + if (C.hasQueuesTag()) + OS << "queues: "; + + llvm::interleaveComma(C.getQueueIdExprs(), OS, + [&](const Expr *E) { printExpr(E); }); + OS << ")"; + } +} + +void OpenACCClausePrinter::VisitDeviceTypeClause( + const OpenACCDeviceTypeClause &C) { + OS << C.getClauseKind(); + OS << "("; + llvm::interleaveComma(C.getArchitectures(), OS, + [&](const DeviceTypeArgument &Arch) { + if (Arch.first == nullptr) + OS << "*"; + else + OS << Arch.first->getName(); + }); + OS << ")"; +} + +void OpenACCClausePrinter::VisitAutoClause(const OpenACCAutoClause &C) { + OS << "auto"; +} + +void OpenACCClausePrinter::VisitIndependentClause( + const OpenACCIndependentClause &C) { + OS << "independent"; +} + +void OpenACCClausePrinter::VisitSeqClause(const OpenACCSeqClause &C) { + OS << "seq"; +} diff --git a/contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp b/contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp index 50f40395a197..042a5df5906c 100644 --- a/contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp +++ b/contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp @@ -23,6 +23,7 @@ #include "llvm/Support/ErrorHandling.h" #include <algorithm> #include <cassert> +#include <optional> using namespace clang; using namespace llvm; @@ -102,6 +103,8 @@ const OMPClauseWithPreInit *OMPClauseWithPreInit::get(const OMPClause *C) { return static_cast<const OMPNocontextClause *>(C); case OMPC_filter: return static_cast<const OMPFilterClause *>(C); + case OMPC_ompx_dyn_cgroup_mem: + return static_cast<const OMPXDynCGroupMemClause *>(C); case OMPC_default: case OMPC_proc_bind: case OMPC_safelen: @@ -126,6 +129,8 @@ const OMPClauseWithPreInit *OMPClauseWithPreInit::get(const OMPClause *C) { case OMPC_write: case OMPC_update: case OMPC_capture: + case OMPC_compare: + case OMPC_fail: case OMPC_seq_cst: case OMPC_acq_rel: case OMPC_acquire: @@ -145,11 +150,15 @@ const OMPClauseWithPreInit *OMPClauseWithPreInit::get(const OMPClause *C) { case OMPC_use_device_ptr: case OMPC_use_device_addr: case OMPC_is_device_ptr: + case OMPC_has_device_addr: case OMPC_unified_address: case OMPC_unified_shared_memory: case OMPC_reverse_offload: case OMPC_dynamic_allocators: case OMPC_atomic_default_mem_order: + case OMPC_at: + case OMPC_severity: + case OMPC_message: case OMPC_device_type: case OMPC_match: case OMPC_nontemporal: @@ -160,6 +169,9 @@ const OMPClauseWithPreInit *OMPClauseWithPreInit::get(const OMPClause *C) { case OMPC_exclusive: case OMPC_uses_allocators: case OMPC_affinity: + case OMPC_when: + case OMPC_bind: + case OMPC_ompx_bare: break; default: break; @@ -215,6 +227,8 @@ const OMPClauseWithPostUpdate *OMPClauseWithPostUpdate::get(const OMPClause *C) case OMPC_write: case OMPC_update: case OMPC_capture: + case OMPC_compare: + case OMPC_fail: case OMPC_seq_cst: case OMPC_acq_rel: case OMPC_acquire: @@ -240,11 +254,15 @@ const OMPClauseWithPostUpdate *OMPClauseWithPostUpdate::get(const OMPClause *C) case OMPC_use_device_ptr: case OMPC_use_device_addr: case OMPC_is_device_ptr: + case OMPC_has_device_addr: case OMPC_unified_address: case OMPC_unified_shared_memory: case OMPC_reverse_offload: case OMPC_dynamic_allocators: case OMPC_atomic_default_mem_order: + case OMPC_at: + case OMPC_severity: + case OMPC_message: case OMPC_device_type: case OMPC_match: case OMPC_nontemporal: @@ -257,6 +275,8 @@ const OMPClauseWithPostUpdate *OMPClauseWithPostUpdate::get(const OMPClause *C) case OMPC_exclusive: case OMPC_uses_allocators: case OMPC_affinity: + case OMPC_when: + case OMPC_bind: break; default: break; @@ -299,7 +319,7 @@ OMPClause::child_range OMPNumTasksClause::used_children() { OMPClause::child_range OMPFinalClause::used_children() { if (Stmt **C = getAddrOfExprAsWritten(getPreInitStmt())) return child_range(C, C + 1); - return child_range(&Condition, &Condition + 1); + return children(); } OMPClause::child_range OMPPriorityClause::used_children() { @@ -311,13 +331,13 @@ OMPClause::child_range OMPPriorityClause::used_children() { OMPClause::child_range OMPNovariantsClause::used_children() { if (Stmt **C = getAddrOfExprAsWritten(getPreInitStmt())) return child_range(C, C + 1); - return child_range(&Condition, &Condition + 1); + return children(); } OMPClause::child_range OMPNocontextClause::used_children() { if (Stmt **C = getAddrOfExprAsWritten(getPreInitStmt())) return child_range(C, C + 1); - return child_range(&Condition, &Condition + 1); + return children(); } OMPOrderedClause *OMPOrderedClause::Create(const ASTContext &C, Expr *Num, @@ -353,7 +373,7 @@ void OMPOrderedClause::setLoopNumIterations(unsigned NumLoop, } ArrayRef<Expr *> OMPOrderedClause::getLoopNumIterations() const { - return llvm::makeArrayRef(getTrailingObjects<Expr *>(), NumberOfLoops); + return llvm::ArrayRef(getTrailingObjects<Expr *>(), NumberOfLoops); } void OMPOrderedClause::setLoopCounter(unsigned NumLoop, Expr *Counter) { @@ -565,15 +585,17 @@ void OMPLinearClause::setUsedExprs(ArrayRef<Expr *> UE) { OMPLinearClause *OMPLinearClause::Create( const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, - SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, - ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep, - Stmt *PreInit, Expr *PostUpdate) { + SourceLocation ColonLoc, SourceLocation StepModifierLoc, + SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PL, + ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep, Stmt *PreInit, + Expr *PostUpdate) { // Allocate space for 5 lists (Vars, Inits, Updates, Finals), 2 expressions // (Step and CalcStep), list of used expression + step. void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(5 * VL.size() + 2 + VL.size() + 1)); - OMPLinearClause *Clause = new (Mem) OMPLinearClause( - StartLoc, LParenLoc, Modifier, ModifierLoc, ColonLoc, EndLoc, VL.size()); + OMPLinearClause *Clause = + new (Mem) OMPLinearClause(StartLoc, LParenLoc, Modifier, ModifierLoc, + ColonLoc, StepModifierLoc, EndLoc, VL.size()); Clause->setVarRefs(VL); Clause->setPrivates(PL); Clause->setInits(IL); @@ -625,6 +647,13 @@ OMPAlignedClause *OMPAlignedClause::CreateEmpty(const ASTContext &C, return new (Mem) OMPAlignedClause(NumVars); } +OMPAlignClause *OMPAlignClause::Create(const ASTContext &C, Expr *A, + SourceLocation StartLoc, + SourceLocation LParenLoc, + SourceLocation EndLoc) { + return new (C) OMPAlignClause(A, StartLoc, LParenLoc, EndLoc); +} + void OMPCopyinClause::setSourceExprs(ArrayRef<Expr *> SrcExprs) { assert(SrcExprs.size() == varlist_size() && "Number of source expressions is " "not the same as the " @@ -1025,19 +1054,19 @@ OMPDepobjClause *OMPDepobjClause::CreateEmpty(const ASTContext &C) { OMPDependClause * OMPDependClause::Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, - Expr *DepModifier, OpenMPDependClauseKind DepKind, - SourceLocation DepLoc, SourceLocation ColonLoc, + DependDataTy Data, Expr *DepModifier, ArrayRef<Expr *> VL, unsigned NumLoops) { void *Mem = C.Allocate( totalSizeToAlloc<Expr *>(VL.size() + /*depend-modifier*/ 1 + NumLoops), alignof(OMPDependClause)); OMPDependClause *Clause = new (Mem) OMPDependClause(StartLoc, LParenLoc, EndLoc, VL.size(), NumLoops); - Clause->setVarRefs(VL); - Clause->setDependencyKind(DepKind); - Clause->setDependencyLoc(DepLoc); - Clause->setColonLoc(ColonLoc); + Clause->setDependencyKind(Data.DepKind); + Clause->setDependencyLoc(Data.DepLoc); + Clause->setColonLoc(Data.ColonLoc); + Clause->setOmpAllMemoryLoc(Data.OmpAllMemoryLoc); Clause->setModifier(DepModifier); + Clause->setVarRefs(VL); for (unsigned I = 0 ; I < NumLoops; ++I) Clause->setLoopData(I, nullptr); return Clause; @@ -1112,7 +1141,7 @@ OMPMapClause *OMPMapClause::Create( const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, - ArrayRef<OpenMPMapModifierKind> MapModifiers, + Expr *IteratorModifier, ArrayRef<OpenMPMapModifierKind> MapModifiers, ArrayRef<SourceLocation> MapModifiersLoc, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId, OpenMPMapClauseKind Type, bool TypeIsImplicit, SourceLocation TypeLoc) { @@ -1135,7 +1164,7 @@ OMPMapClause *OMPMapClause::Create( void *Mem = C.Allocate( totalSizeToAlloc<Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent>( - 2 * Sizes.NumVars, Sizes.NumUniqueDeclarations, + 2 * Sizes.NumVars + 1, Sizes.NumUniqueDeclarations, Sizes.NumUniqueDeclarations + Sizes.NumComponentLists, Sizes.NumComponents)); OMPMapClause *Clause = new (Mem) @@ -1144,6 +1173,7 @@ OMPMapClause *OMPMapClause::Create( Clause->setVarRefs(Vars); Clause->setUDMapperRefs(UDMapperRefs); + Clause->setIteratorModifier(IteratorModifier); Clause->setClauseInfo(Declarations, ComponentLists); Clause->setMapType(Type); Clause->setMapLoc(TypeLoc); @@ -1156,10 +1186,12 @@ OMPMapClause::CreateEmpty(const ASTContext &C, void *Mem = C.Allocate( totalSizeToAlloc<Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent>( - 2 * Sizes.NumVars, Sizes.NumUniqueDeclarations, + 2 * Sizes.NumVars + 1, Sizes.NumUniqueDeclarations, Sizes.NumUniqueDeclarations + Sizes.NumComponentLists, Sizes.NumComponents)); - return new (Mem) OMPMapClause(Sizes); + OMPMapClause *Clause = new (Mem) OMPMapClause(Sizes); + Clause->setIteratorModifier(nullptr); + return Clause; } OMPToClause *OMPToClause::Create( @@ -1419,6 +1451,53 @@ OMPIsDevicePtrClause::CreateEmpty(const ASTContext &C, return new (Mem) OMPIsDevicePtrClause(Sizes); } +OMPHasDeviceAddrClause * +OMPHasDeviceAddrClause::Create(const ASTContext &C, const OMPVarListLocTy &Locs, + ArrayRef<Expr *> Vars, + ArrayRef<ValueDecl *> Declarations, + MappableExprComponentListsRef ComponentLists) { + OMPMappableExprListSizeTy Sizes; + Sizes.NumVars = Vars.size(); + Sizes.NumUniqueDeclarations = getUniqueDeclarationsTotalNumber(Declarations); + Sizes.NumComponentLists = ComponentLists.size(); + Sizes.NumComponents = getComponentsTotalNumber(ComponentLists); + + // We need to allocate: + // NumVars x Expr* - we have an original list expression for each clause list + // entry. + // NumUniqueDeclarations x ValueDecl* - unique base declarations associated + // with each component list. + // (NumUniqueDeclarations + NumComponentLists) x unsigned - we specify the + // number of lists for each unique declaration and the size of each component + // list. + // NumComponents x MappableComponent - the total of all the components in all + // the lists. + void *Mem = C.Allocate( + totalSizeToAlloc<Expr *, ValueDecl *, unsigned, + OMPClauseMappableExprCommon::MappableComponent>( + Sizes.NumVars, Sizes.NumUniqueDeclarations, + Sizes.NumUniqueDeclarations + Sizes.NumComponentLists, + Sizes.NumComponents)); + + auto *Clause = new (Mem) OMPHasDeviceAddrClause(Locs, Sizes); + + Clause->setVarRefs(Vars); + Clause->setClauseInfo(Declarations, ComponentLists); + return Clause; +} + +OMPHasDeviceAddrClause * +OMPHasDeviceAddrClause::CreateEmpty(const ASTContext &C, + const OMPMappableExprListSizeTy &Sizes) { + void *Mem = C.Allocate( + totalSizeToAlloc<Expr *, ValueDecl *, unsigned, + OMPClauseMappableExprCommon::MappableComponent>( + Sizes.NumVars, Sizes.NumUniqueDeclarations, + Sizes.NumUniqueDeclarations + Sizes.NumComponentLists, + Sizes.NumComponents)); + return new (Mem) OMPHasDeviceAddrClause(Sizes); +} + OMPNontemporalClause *OMPNontemporalClause::Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, @@ -1564,18 +1643,19 @@ OMPAffinityClause *OMPAffinityClause::CreateEmpty(const ASTContext &C, } OMPInitClause *OMPInitClause::Create(const ASTContext &C, Expr *InteropVar, - ArrayRef<Expr *> PrefExprs, bool IsTarget, - bool IsTargetSync, SourceLocation StartLoc, + OMPInteropInfo &InteropInfo, + SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc) { - void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(PrefExprs.size() + 1)); - auto *Clause = - new (Mem) OMPInitClause(IsTarget, IsTargetSync, StartLoc, LParenLoc, - VarLoc, EndLoc, PrefExprs.size() + 1); + void *Mem = + C.Allocate(totalSizeToAlloc<Expr *>(InteropInfo.PreferTypes.size() + 1)); + auto *Clause = new (Mem) OMPInitClause( + InteropInfo.IsTarget, InteropInfo.IsTargetSync, StartLoc, LParenLoc, + VarLoc, EndLoc, InteropInfo.PreferTypes.size() + 1); Clause->setInteropVar(InteropVar); - llvm::copy(PrefExprs, Clause->getTrailingObjects<Expr *>() + 1); + llvm::copy(InteropInfo.PreferTypes, Clause->getTrailingObjects<Expr *>() + 1); return Clause; } @@ -1584,6 +1664,62 @@ OMPInitClause *OMPInitClause::CreateEmpty(const ASTContext &C, unsigned N) { return new (Mem) OMPInitClause(N); } +OMPBindClause * +OMPBindClause::Create(const ASTContext &C, OpenMPBindClauseKind K, + SourceLocation KLoc, SourceLocation StartLoc, + SourceLocation LParenLoc, SourceLocation EndLoc) { + return new (C) OMPBindClause(K, KLoc, StartLoc, LParenLoc, EndLoc); +} + +OMPBindClause *OMPBindClause::CreateEmpty(const ASTContext &C) { + return new (C) OMPBindClause(); +} + +OMPDoacrossClause * +OMPDoacrossClause::Create(const ASTContext &C, SourceLocation StartLoc, + SourceLocation LParenLoc, SourceLocation EndLoc, + OpenMPDoacrossClauseModifier DepType, + SourceLocation DepLoc, SourceLocation ColonLoc, + ArrayRef<Expr *> VL, unsigned NumLoops) { + void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size() + NumLoops), + alignof(OMPDoacrossClause)); + OMPDoacrossClause *Clause = new (Mem) + OMPDoacrossClause(StartLoc, LParenLoc, EndLoc, VL.size(), NumLoops); + Clause->setDependenceType(DepType); + Clause->setDependenceLoc(DepLoc); + Clause->setColonLoc(ColonLoc); + Clause->setVarRefs(VL); + for (unsigned I = 0; I < NumLoops; ++I) + Clause->setLoopData(I, nullptr); + return Clause; +} + +OMPDoacrossClause *OMPDoacrossClause::CreateEmpty(const ASTContext &C, + unsigned N, + unsigned NumLoops) { + void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N + NumLoops), + alignof(OMPDoacrossClause)); + return new (Mem) OMPDoacrossClause(N, NumLoops); +} + +void OMPDoacrossClause::setLoopData(unsigned NumLoop, Expr *Cnt) { + assert(NumLoop < NumLoops && "Loop index must be less number of loops."); + auto *It = std::next(getVarRefs().end(), NumLoop); + *It = Cnt; +} + +Expr *OMPDoacrossClause::getLoopData(unsigned NumLoop) { + assert(NumLoop < NumLoops && "Loop index must be less number of loops."); + auto *It = std::next(getVarRefs().end(), NumLoop); + return *It; +} + +const Expr *OMPDoacrossClause::getLoopData(unsigned NumLoop) const { + assert(NumLoop < NumLoops && "Loop index must be less number of loops."); + const auto *It = std::next(getVarRefs().end(), NumLoop); + return *It; +} + //===----------------------------------------------------------------------===// // OpenMP clauses printing methods //===----------------------------------------------------------------------===// @@ -1608,6 +1744,12 @@ void OMPClausePrinter::VisitOMPNumThreadsClause(OMPNumThreadsClause *Node) { OS << ")"; } +void OMPClausePrinter::VisitOMPAlignClause(OMPAlignClause *Node) { + OS << "align("; + Node->getAlignment()->printPretty(OS, nullptr, Policy, 0); + OS << ")"; +} + void OMPClausePrinter::VisitOMPSafelenClause(OMPSafelenClause *Node) { OS << "safelen("; Node->getSafelen()->printPretty(OS, nullptr, Policy, 0); @@ -1623,7 +1765,7 @@ void OMPClausePrinter::VisitOMPSimdlenClause(OMPSimdlenClause *Node) { void OMPClausePrinter::VisitOMPSizesClause(OMPSizesClause *Node) { OS << "sizes("; bool First = true; - for (auto Size : Node->getSizesRefs()) { + for (auto *Size : Node->getSizesRefs()) { if (!First) OS << ", "; Size->printPretty(OS, nullptr, Policy, 0); @@ -1702,6 +1844,22 @@ void OMPClausePrinter::VisitOMPAtomicDefaultMemOrderClause( << ")"; } +void OMPClausePrinter::VisitOMPAtClause(OMPAtClause *Node) { + OS << "at(" << getOpenMPSimpleClauseTypeName(OMPC_at, Node->getAtKind()) + << ")"; +} + +void OMPClausePrinter::VisitOMPSeverityClause(OMPSeverityClause *Node) { + OS << "severity(" + << getOpenMPSimpleClauseTypeName(OMPC_severity, Node->getSeverityKind()) + << ")"; +} + +void OMPClausePrinter::VisitOMPMessageClause(OMPMessageClause *Node) { + OS << "message(\"" + << cast<StringLiteral>(Node->getMessageString())->getString() << "\")"; +} + void OMPClausePrinter::VisitOMPScheduleClause(OMPScheduleClause *Node) { OS << "schedule("; if (Node->getFirstScheduleModifier() != OMPC_SCHEDULE_MODIFIER_unknown) { @@ -1765,6 +1923,20 @@ void OMPClausePrinter::VisitOMPCaptureClause(OMPCaptureClause *) { OS << "capture"; } +void OMPClausePrinter::VisitOMPCompareClause(OMPCompareClause *) { + OS << "compare"; +} + +void OMPClausePrinter::VisitOMPFailClause(OMPFailClause *Node) { + OS << "fail"; + if (Node) { + OS << "("; + OS << getOpenMPSimpleClauseTypeName( + Node->getClauseKind(), static_cast<int>(Node->getFailParameter())); + OS << ")"; + } +} + void OMPClausePrinter::VisitOMPSeqCstClause(OMPSeqCstClause *) { OS << "seq_cst"; } @@ -1785,6 +1957,8 @@ void OMPClausePrinter::VisitOMPRelaxedClause(OMPRelaxedClause *) { OS << "relaxed"; } +void OMPClausePrinter::VisitOMPWeakClause(OMPWeakClause *) { OS << "weak"; } + void OMPClausePrinter::VisitOMPThreadsClause(OMPThreadsClause *) { OS << "threads"; } @@ -1822,12 +1996,22 @@ void OMPClausePrinter::VisitOMPPriorityClause(OMPPriorityClause *Node) { void OMPClausePrinter::VisitOMPGrainsizeClause(OMPGrainsizeClause *Node) { OS << "grainsize("; + OpenMPGrainsizeClauseModifier Modifier = Node->getModifier(); + if (Modifier != OMPC_GRAINSIZE_unknown) { + OS << getOpenMPSimpleClauseTypeName(Node->getClauseKind(), Modifier) + << ": "; + } Node->getGrainsize()->printPretty(OS, nullptr, Policy, 0); OS << ")"; } void OMPClausePrinter::VisitOMPNumTasksClause(OMPNumTasksClause *Node) { OS << "num_tasks("; + OpenMPNumTasksClauseModifier Modifier = Node->getModifier(); + if (Modifier != OMPC_NUMTASKS_unknown) { + OS << getOpenMPSimpleClauseTypeName(Node->getClauseKind(), Modifier) + << ": "; + } Node->getNumTasks()->printPretty(OS, nullptr, Policy, 0); OS << ")"; } @@ -2039,16 +2223,20 @@ void OMPClausePrinter::VisitOMPInReductionClause(OMPInReductionClause *Node) { void OMPClausePrinter::VisitOMPLinearClause(OMPLinearClause *Node) { if (!Node->varlist_empty()) { OS << "linear"; + VisitOMPClauseList(Node, '('); + if (Node->getModifierLoc().isValid() || Node->getStep() != nullptr) { + OS << ": "; + } if (Node->getModifierLoc().isValid()) { - OS << '(' - << getOpenMPSimpleClauseTypeName(OMPC_linear, Node->getModifier()); + OS << getOpenMPSimpleClauseTypeName(OMPC_linear, Node->getModifier()); } - VisitOMPClauseList(Node, '('); - if (Node->getModifierLoc().isValid()) - OS << ')'; if (Node->getStep() != nullptr) { - OS << ": "; + if (Node->getModifierLoc().isValid()) { + OS << ", "; + } + OS << "step("; Node->getStep()->printPretty(OS, nullptr, Policy, 0); + OS << ")"; } OS << ")"; } @@ -2101,11 +2289,23 @@ void OMPClausePrinter::VisitOMPDependClause(OMPDependClause *Node) { DepModifier->printPretty(OS, nullptr, Policy); OS << ", "; } - OS << getOpenMPSimpleClauseTypeName(Node->getClauseKind(), - Node->getDependencyKind()); - if (!Node->varlist_empty()) { + OpenMPDependClauseKind DepKind = Node->getDependencyKind(); + OpenMPDependClauseKind PrintKind = DepKind; + bool IsOmpAllMemory = false; + if (PrintKind == OMPC_DEPEND_outallmemory) { + PrintKind = OMPC_DEPEND_out; + IsOmpAllMemory = true; + } else if (PrintKind == OMPC_DEPEND_inoutallmemory) { + PrintKind = OMPC_DEPEND_inout; + IsOmpAllMemory = true; + } + OS << getOpenMPSimpleClauseTypeName(Node->getClauseKind(), PrintKind); + if (!Node->varlist_empty() || IsOmpAllMemory) OS << " :"; - VisitOMPClauseList(Node, ' '); + VisitOMPClauseList(Node, ' '); + if (IsOmpAllMemory) { + OS << (Node->varlist_empty() ? " " : ","); + OS << "omp_all_memory"; } OS << ")"; } @@ -2121,16 +2321,27 @@ static void PrintMapper(raw_ostream &OS, T *Node, OS << Node->getMapperIdInfo() << ')'; } +template <typename T> +static void PrintIterator(raw_ostream &OS, T *Node, + const PrintingPolicy &Policy) { + if (Expr *IteratorModifier = Node->getIteratorModifier()) + IteratorModifier->printPretty(OS, nullptr, Policy); +} + void OMPClausePrinter::VisitOMPMapClause(OMPMapClause *Node) { if (!Node->varlist_empty()) { OS << "map("; if (Node->getMapType() != OMPC_MAP_unknown) { for (unsigned I = 0; I < NumberOfOMPMapClauseModifiers; ++I) { if (Node->getMapTypeModifier(I) != OMPC_MAP_MODIFIER_unknown) { - OS << getOpenMPSimpleClauseTypeName(OMPC_map, - Node->getMapTypeModifier(I)); - if (Node->getMapTypeModifier(I) == OMPC_MAP_MODIFIER_mapper) - PrintMapper(OS, Node, Policy); + if (Node->getMapTypeModifier(I) == OMPC_MAP_MODIFIER_iterator) { + PrintIterator(OS, Node, Policy); + } else { + OS << getOpenMPSimpleClauseTypeName(OMPC_map, + Node->getMapTypeModifier(I)); + if (Node->getMapTypeModifier(I) == OMPC_MAP_MODIFIER_mapper) + PrintMapper(OS, Node, Policy); + } OS << ','; } } @@ -2226,6 +2437,14 @@ void OMPClausePrinter::VisitOMPIsDevicePtrClause(OMPIsDevicePtrClause *Node) { } } +void OMPClausePrinter::VisitOMPHasDeviceAddrClause(OMPHasDeviceAddrClause *Node) { + if (!Node->varlist_empty()) { + OS << "has_device_addr"; + VisitOMPClauseList(Node, '('); + OS << ")"; + } +} + void OMPClausePrinter::VisitOMPNontemporalClause(OMPNontemporalClause *Node) { if (!Node->varlist_empty()) { OS << "nontemporal"; @@ -2235,8 +2454,12 @@ void OMPClausePrinter::VisitOMPNontemporalClause(OMPNontemporalClause *Node) { } void OMPClausePrinter::VisitOMPOrderClause(OMPOrderClause *Node) { - OS << "order(" << getOpenMPSimpleClauseTypeName(OMPC_order, Node->getKind()) - << ")"; + OS << "order("; + if (Node->getModifier() != OMPC_ORDER_MODIFIER_unknown) { + OS << getOpenMPSimpleClauseTypeName(OMPC_order, Node->getModifier()); + OS << ": "; + } + OS << getOpenMPSimpleClauseTypeName(OMPC_order, Node->getKind()) << ")"; } void OMPClausePrinter::VisitOMPInclusiveClause(OMPInclusiveClause *Node) { @@ -2295,6 +2518,59 @@ void OMPClausePrinter::VisitOMPFilterClause(OMPFilterClause *Node) { OS << ")"; } +void OMPClausePrinter::VisitOMPBindClause(OMPBindClause *Node) { + OS << "bind(" + << getOpenMPSimpleClauseTypeName(OMPC_bind, unsigned(Node->getBindKind())) + << ")"; +} + +void OMPClausePrinter::VisitOMPXDynCGroupMemClause( + OMPXDynCGroupMemClause *Node) { + OS << "ompx_dyn_cgroup_mem("; + Node->getSize()->printPretty(OS, nullptr, Policy, 0); + OS << ")"; +} + +void OMPClausePrinter::VisitOMPDoacrossClause(OMPDoacrossClause *Node) { + OS << "doacross("; + OpenMPDoacrossClauseModifier DepType = Node->getDependenceType(); + + switch (DepType) { + case OMPC_DOACROSS_source: + OS << "source:"; + break; + case OMPC_DOACROSS_sink: + OS << "sink:"; + break; + case OMPC_DOACROSS_source_omp_cur_iteration: + OS << "source: omp_cur_iteration"; + break; + case OMPC_DOACROSS_sink_omp_cur_iteration: + OS << "sink: omp_cur_iteration - 1"; + break; + default: + llvm_unreachable("unknown docaross modifier"); + } + VisitOMPClauseList(Node, ' '); + OS << ")"; +} + +void OMPClausePrinter::VisitOMPXAttributeClause(OMPXAttributeClause *Node) { + OS << "ompx_attribute("; + bool IsFirst = true; + for (auto &Attr : Node->getAttrs()) { + if (!IsFirst) + OS << ", "; + Attr->printPretty(OS, Policy); + IsFirst = false; + } + OS << ")"; +} + +void OMPClausePrinter::VisitOMPXBareClause(OMPXBareClause *Node) { + OS << "ompx_bare"; +} + void OMPTraitInfo::getAsVariantMatchInfo(ASTContext &ASTCtx, VariantMatchInfo &VMI) const { for (const OMPTraitSet &Set : Sets) { @@ -2309,18 +2585,17 @@ void OMPTraitInfo::getAsVariantMatchInfo(ASTContext &ASTCtx, TraitProperty::user_condition_unknown && "Ill-formed user condition, expected unknown trait property!"); - if (Optional<APSInt> CondVal = + if (std::optional<APSInt> CondVal = Selector.ScoreOrCondition->getIntegerConstantExpr(ASTCtx)) - VMI.addTrait(CondVal->isNullValue() - ? TraitProperty::user_condition_false - : TraitProperty::user_condition_true, + VMI.addTrait(CondVal->isZero() ? TraitProperty::user_condition_false + : TraitProperty::user_condition_true, "<condition>"); else VMI.addTrait(TraitProperty::user_condition_false, "<condition>"); continue; } - Optional<llvm::APSInt> Score; + std::optional<llvm::APSInt> Score; llvm::APInt *ScorePtr = nullptr; if (Selector.ScoreOrCondition) { if ((Score = Selector.ScoreOrCondition->getIntegerConstantExpr(ASTCtx))) @@ -2342,8 +2617,6 @@ void OMPTraitInfo::getAsVariantMatchInfo(ASTContext &ASTCtx, getOpenMPContextTraitPropertyForSelector( Selector.Kind) && "Ill-formed construct selector!"); - - VMI.ConstructTraits.push_back(Selector.Properties.front().Kind); } } } @@ -2424,7 +2697,7 @@ std::string OMPTraitInfo::getMangledName() const { Property.RawString); } } - return OS.str(); + return MangledName; } OMPTraitInfo::OMPTraitInfo(StringRef MangledName) { @@ -2474,14 +2747,18 @@ llvm::raw_ostream &clang::operator<<(llvm::raw_ostream &OS, TargetOMPContext::TargetOMPContext( ASTContext &ASTCtx, std::function<void(StringRef)> &&DiagUnknownTrait, - const FunctionDecl *CurrentFunctionDecl) - : OMPContext(ASTCtx.getLangOpts().OpenMPIsDevice, + const FunctionDecl *CurrentFunctionDecl, + ArrayRef<llvm::omp::TraitProperty> ConstructTraits) + : OMPContext(ASTCtx.getLangOpts().OpenMPIsTargetDevice, ASTCtx.getTargetInfo().getTriple()), FeatureValidityCheck([&](StringRef FeatureName) { return ASTCtx.getTargetInfo().isValidFeatureName(FeatureName); }), DiagUnknownTrait(std::move(DiagUnknownTrait)) { ASTCtx.getFunctionFeatureMap(FeatureMap, CurrentFunctionDecl); + + for (llvm::omp::TraitProperty Property : ConstructTraits) + addTrait(Property); } bool TargetOMPContext::matchesISATrait(StringRef RawString) const { diff --git a/contrib/llvm-project/clang/lib/AST/ParentMap.cpp b/contrib/llvm-project/clang/lib/AST/ParentMap.cpp index 2ff5c9d8aeb5..e97cb5e226f5 100644 --- a/contrib/llvm-project/clang/lib/AST/ParentMap.cpp +++ b/contrib/llvm-project/clang/lib/AST/ParentMap.cpp @@ -33,9 +33,11 @@ static void BuildParentMap(MapTy& M, Stmt* S, switch (S->getStmtClass()) { case Stmt::PseudoObjectExprClass: { - assert(OVMode == OV_Transparent && "Should not appear alongside OVEs"); PseudoObjectExpr *POE = cast<PseudoObjectExpr>(S); + if (OVMode == OV_Opaque && M[POE->getSyntacticForm()]) + break; + // If we are rebuilding the map, clear out any existing state. if (M[POE->getSyntacticForm()]) for (Stmt *SubStmt : S->children()) @@ -133,12 +135,13 @@ void ParentMap::setParent(const Stmt *S, const Stmt *Parent) { Stmt* ParentMap::getParent(Stmt* S) const { MapTy* M = (MapTy*) Impl; - MapTy::iterator I = M->find(S); - return I == M->end() ? nullptr : I->second; + return M->lookup(S); } Stmt *ParentMap::getParentIgnoreParens(Stmt *S) const { - do { S = getParent(S); } while (S && isa<ParenExpr>(S)); + do { + S = getParent(S); + } while (isa_and_nonnull<ParenExpr>(S)); return S; } @@ -154,7 +157,8 @@ Stmt *ParentMap::getParentIgnoreParenCasts(Stmt *S) const { Stmt *ParentMap::getParentIgnoreParenImpCasts(Stmt *S) const { do { S = getParent(S); - } while (S && isa<Expr>(S) && cast<Expr>(S)->IgnoreParenImpCasts() != S); + } while (isa_and_nonnull<Expr>(S) && + cast<Expr>(S)->IgnoreParenImpCasts() != S); return S; } diff --git a/contrib/llvm-project/clang/lib/AST/ParentMapContext.cpp b/contrib/llvm-project/clang/lib/AST/ParentMapContext.cpp index 4a3e0a99c8a6..9723c0cfa83b 100644 --- a/contrib/llvm-project/clang/lib/AST/ParentMapContext.cpp +++ b/contrib/llvm-project/clang/lib/AST/ParentMapContext.cpp @@ -61,7 +61,26 @@ class ParentMapContext::ParentMap { template <typename, typename...> friend struct ::MatchParents; /// Contains parents of a node. - using ParentVector = llvm::SmallVector<DynTypedNode, 2>; + class ParentVector { + public: + ParentVector() = default; + explicit ParentVector(size_t N, const DynTypedNode &Value) { + Items.reserve(N); + for (; N > 0; --N) + push_back(Value); + } + bool contains(const DynTypedNode &Value) { + return Seen.contains(Value); + } + void push_back(const DynTypedNode &Value) { + if (!Value.getMemoizationData() || Seen.insert(Value).second) + Items.push_back(Value); + } + llvm::ArrayRef<DynTypedNode> view() const { return Items; } + private: + llvm::SmallVector<DynTypedNode, 2> Items; + llvm::SmallDenseSet<DynTypedNode, 2> Seen; + }; /// Maps from a node to its parents. This is used for nodes that have /// pointer identity only, which are more common and we can save space by @@ -99,7 +118,7 @@ class ParentMapContext::ParentMap { return llvm::ArrayRef<DynTypedNode>(); } if (const auto *V = I->second.template dyn_cast<ParentVector *>()) { - return llvm::makeArrayRef(*V); + return V->view(); } return getSingleDynTypedNodeFromParentMap(I->second); } @@ -252,7 +271,7 @@ public: const auto *S = It->second.dyn_cast<const Stmt *>(); if (!S) { if (auto *Vec = It->second.dyn_cast<ParentVector *>()) - return llvm::makeArrayRef(*Vec); + return Vec->view(); return getSingleDynTypedNodeFromParentMap(It->second); } const auto *P = dyn_cast<Expr>(S); @@ -265,16 +284,6 @@ public: } }; -template <typename Tuple, std::size_t... Is> -auto tuple_pop_front_impl(const Tuple &tuple, std::index_sequence<Is...>) { - return std::make_tuple(std::get<1 + Is>(tuple)...); -} - -template <typename Tuple> auto tuple_pop_front(const Tuple &tuple) { - return tuple_pop_front_impl( - tuple, std::make_index_sequence<std::tuple_size<Tuple>::value - 1>()); -} - template <typename T, typename... U> struct MatchParents { static std::tuple<bool, DynTypedNodeList, const T *, const U *...> match(const DynTypedNodeList &NodeList, @@ -285,10 +294,11 @@ template <typename T, typename... U> struct MatchParents { if (NextParentList.size() == 1) { auto TailTuple = MatchParents<U...>::match(NextParentList, ParentMap); if (std::get<bool>(TailTuple)) { - return std::tuple_cat( - std::make_tuple(true, std::get<DynTypedNodeList>(TailTuple), - TypedNode), - tuple_pop_front(tuple_pop_front(TailTuple))); + return std::apply( + [TypedNode](bool, DynTypedNodeList NodeList, auto... TupleTail) { + return std::make_tuple(true, NodeList, TypedNode, TupleTail...); + }, + TailTuple); } } } @@ -330,6 +340,9 @@ template <> DynTypedNode createDynTypedNode(const NestedNameSpecifierLoc &Node) { return DynTypedNode::create(Node); } +template <> DynTypedNode createDynTypedNode(const ObjCProtocolLoc &Node) { + return DynTypedNode::create(Node); +} /// @} /// A \c RecursiveASTVisitor that builds a map from nodes to their @@ -389,21 +402,23 @@ private: auto *Vector = NodeOrVector.template get<ParentVector *>(); // Skip duplicates for types that have memoization data. // We must check that the type has memoization data before calling - // std::find() because DynTypedNode::operator== can't compare all + // llvm::is_contained() because DynTypedNode::operator== can't compare all // types. bool Found = ParentStack.back().getMemoizationData() && - std::find(Vector->begin(), Vector->end(), - ParentStack.back()) != Vector->end(); + llvm::is_contained(*Vector, ParentStack.back()); if (!Found) Vector->push_back(ParentStack.back()); } } + template <typename T> static bool isNull(T Node) { return !Node; } + static bool isNull(ObjCProtocolLoc Node) { return false; } + template <typename T, typename MapNodeTy, typename BaseTraverseFn, typename MapTy> bool TraverseNode(T Node, MapNodeTy MapNode, BaseTraverseFn BaseTraverse, MapTy *Parents) { - if (!Node) + if (isNull(Node)) return true; addParent(MapNode, Parents); ParentStack.push_back(createDynTypedNode(Node)); @@ -429,6 +444,17 @@ private: [&] { return VisitorBase::TraverseNestedNameSpecifierLoc(NNSLocNode); }, &Map.OtherParents); } + bool TraverseAttr(Attr *AttrNode) { + return TraverseNode( + AttrNode, AttrNode, [&] { return VisitorBase::TraverseAttr(AttrNode); }, + &Map.PointerParents); + } + bool TraverseObjCProtocolLoc(ObjCProtocolLoc ProtocolLocNode) { + return TraverseNode( + ProtocolLocNode, DynTypedNode::create(ProtocolLocNode), + [&] { return VisitorBase::TraverseObjCProtocolLoc(ProtocolLocNode); }, + &Map.OtherParents); + } // Using generic TraverseNode for Stmt would prevent data-recursion. bool dataTraverseStmtPre(Stmt *StmtNode) { diff --git a/contrib/llvm-project/clang/lib/AST/PrintfFormatString.cpp b/contrib/llvm-project/clang/lib/AST/PrintfFormatString.cpp index 150dcbec5187..5fd120bc745b 100644 --- a/contrib/llvm-project/clang/lib/AST/PrintfFormatString.cpp +++ b/contrib/llvm-project/clang/lib/AST/PrintfFormatString.cpp @@ -140,19 +140,19 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H, // Set the privacy flag if the privacy annotation in the // comma-delimited segment is at least as strict as the privacy // annotations in previous comma-delimited segments. - if (MatchedStr.startswith("mask")) { + if (MatchedStr.starts_with("mask")) { StringRef MaskType = MatchedStr.substr(sizeof("mask.") - 1); unsigned Size = MaskType.size(); if (Warn && (Size == 0 || Size > 8)) H.handleInvalidMaskType(MaskType); FS.setMaskType(MaskType); - } else if (MatchedStr.equals("sensitive")) + } else if (MatchedStr == "sensitive") PrivacyFlags = clang::analyze_os_log::OSLogBufferItem::IsSensitive; else if (PrivacyFlags != - clang::analyze_os_log::OSLogBufferItem::IsSensitive && - MatchedStr.equals("private")) + clang::analyze_os_log::OSLogBufferItem::IsSensitive && + MatchedStr == "private") PrivacyFlags = clang::analyze_os_log::OSLogBufferItem::IsPrivate; - else if (PrivacyFlags == 0 && MatchedStr.equals("public")) + else if (PrivacyFlags == 0 && MatchedStr == "public") PrivacyFlags = clang::analyze_os_log::OSLogBufferItem::IsPublic; } else { size_t CommaOrBracePos = @@ -326,6 +326,14 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H, case 's': k = ConversionSpecifier::sArg; break; case 'u': k = ConversionSpecifier::uArg; break; case 'x': k = ConversionSpecifier::xArg; break; + // C23. + case 'b': + if (isFreeBSDKPrintf) + k = ConversionSpecifier::FreeBSDbArg; // int followed by char * + else + k = ConversionSpecifier::bArg; + break; + case 'B': k = ConversionSpecifier::BArg; break; // POSIX specific. case 'C': k = ConversionSpecifier::CArg; break; case 'S': k = ConversionSpecifier::SArg; break; @@ -337,14 +345,11 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H, case '@': k = ConversionSpecifier::ObjCObjArg; break; // Glibc specific. case 'm': k = ConversionSpecifier::PrintErrno; break; - // FreeBSD kernel specific. - case 'b': - if (isFreeBSDKPrintf) - k = ConversionSpecifier::FreeBSDbArg; // int followed by char * - break; case 'r': if (isFreeBSDKPrintf) k = ConversionSpecifier::FreeBSDrArg; // int + else if (LO.FixedPoint) + k = ConversionSpecifier::rArg; break; case 'y': if (isFreeBSDKPrintf) @@ -370,6 +375,20 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H, if (Target.getTriple().isOSMSVCRT()) k = ConversionSpecifier::ZArg; break; + // ISO/IEC TR 18037 (fixed-point) specific. + // NOTE: 'r' is handled up above since FreeBSD also supports %r. + case 'k': + if (LO.FixedPoint) + k = ConversionSpecifier::kArg; + break; + case 'K': + if (LO.FixedPoint) + k = ConversionSpecifier::KArg; + break; + case 'R': + if (LO.FixedPoint) + k = ConversionSpecifier::RArg; + break; } // Check to see if we used the Objective-C modifier flags with @@ -428,7 +447,7 @@ bool clang::analyze_format_string::ParsePrintfString(FormatStringHandler &H, continue; // We have a format specifier. Pass it to the callback. if (!H.HandlePrintfSpecifier(FSR.getValue(), FSR.getStart(), - I - FSR.getStart())) + I - FSR.getStart(), Target)) return true; } assert(I == E && "Format string not exhausted"); @@ -497,7 +516,7 @@ ArgType PrintfSpecifier::getScalarArgType(ASTContext &Ctx, case LengthModifier::AsShort: if (Ctx.getTargetInfo().getTriple().isOSMSVCRT()) return Ctx.IntTy; - LLVM_FALLTHROUGH; + [[fallthrough]]; default: return ArgType::Invalid(); } @@ -624,6 +643,9 @@ ArgType PrintfSpecifier::getScalarArgType(ASTContext &Ctx, } } + if (CS.isFixedPointArg() && !Ctx.getLangOpts().FixedPoint) + return ArgType::Invalid(); + switch (CS.getKind()) { case ConversionSpecifier::sArg: if (LM.getKind() == LengthModifier::AsWideChar) { @@ -655,6 +677,50 @@ ArgType PrintfSpecifier::getScalarArgType(ASTContext &Ctx, return ArgType::CPointerTy; case ConversionSpecifier::ObjCObjArg: return ArgType::ObjCPointerTy; + case ConversionSpecifier::kArg: + switch (LM.getKind()) { + case LengthModifier::None: + return Ctx.AccumTy; + case LengthModifier::AsShort: + return Ctx.ShortAccumTy; + case LengthModifier::AsLong: + return Ctx.LongAccumTy; + default: + return ArgType::Invalid(); + } + case ConversionSpecifier::KArg: + switch (LM.getKind()) { + case LengthModifier::None: + return Ctx.UnsignedAccumTy; + case LengthModifier::AsShort: + return Ctx.UnsignedShortAccumTy; + case LengthModifier::AsLong: + return Ctx.UnsignedLongAccumTy; + default: + return ArgType::Invalid(); + } + case ConversionSpecifier::rArg: + switch (LM.getKind()) { + case LengthModifier::None: + return Ctx.FractTy; + case LengthModifier::AsShort: + return Ctx.ShortFractTy; + case LengthModifier::AsLong: + return Ctx.LongFractTy; + default: + return ArgType::Invalid(); + } + case ConversionSpecifier::RArg: + switch (LM.getKind()) { + case LengthModifier::None: + return Ctx.UnsignedFractTy; + case LengthModifier::AsShort: + return Ctx.UnsignedShortFractTy; + case LengthModifier::AsLong: + return Ctx.UnsignedLongFractTy; + default: + return ArgType::Invalid(); + } default: break; } @@ -711,8 +777,8 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt, CS.setKind(ConversionSpecifier::sArg); // Disable irrelevant flags - HasAlternativeForm = 0; - HasLeadingZeroes = 0; + HasAlternativeForm = false; + HasLeadingZeroes = false; // Set the long length modifier for wide characters if (QT->getPointeeType()->isWideCharType()) @@ -755,6 +821,7 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt, case BuiltinType::BFloat16: case BuiltinType::Float16: case BuiltinType::Float128: + case BuiltinType::Ibm128: case BuiltinType::ShortAccum: case BuiltinType::Accum: case BuiltinType::LongAccum: @@ -796,6 +863,10 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt, #include "clang/Basic/PPCTypes.def" #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: #include "clang/Basic/RISCVVTypes.def" +#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id: +#include "clang/Basic/WebAssemblyReferenceTypes.def" +#define AMDGPU_TYPE(Name, Id, SingletonId) case BuiltinType::Id: +#include "clang/Basic/AMDGPUTypes.def" #define SIGNED_TYPE(Id, SingletonId) #define UNSIGNED_TYPE(Id, SingletonId) #define FLOATING_TYPE(Id, SingletonId) @@ -843,7 +914,7 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt, } // Handle size_t, ptrdiff_t, etc. that have dedicated length modifiers in C99. - if (isa<TypedefType>(QT) && (LangOpt.C99 || LangOpt.CPlusPlus11)) + if (LangOpt.C99 || LangOpt.CPlusPlus11) namedTypeToLengthModifier(QT, LM); // If fixing the length modifier was enough, we might be done. @@ -873,26 +944,24 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt, // Set conversion specifier and disable any flags which do not apply to it. // Let typedefs to char fall through to int, as %c is silly for uint8_t. - if (!isa<TypedefType>(QT) && QT->isCharType()) { + if (!QT->getAs<TypedefType>() && QT->isCharType()) { CS.setKind(ConversionSpecifier::cArg); LM.setKind(LengthModifier::None); Precision.setHowSpecified(OptionalAmount::NotSpecified); - HasAlternativeForm = 0; - HasLeadingZeroes = 0; - HasPlusPrefix = 0; + HasAlternativeForm = false; + HasLeadingZeroes = false; + HasPlusPrefix = false; } // Test for Floating type first as LongDouble can pass isUnsignedIntegerType else if (QT->isRealFloatingType()) { CS.setKind(ConversionSpecifier::fArg); - } - else if (QT->isSignedIntegerType()) { + } else if (QT->isSignedIntegerType()) { CS.setKind(ConversionSpecifier::dArg); - HasAlternativeForm = 0; - } - else if (QT->isUnsignedIntegerType()) { + HasAlternativeForm = false; + } else if (QT->isUnsignedIntegerType()) { CS.setKind(ConversionSpecifier::uArg); - HasAlternativeForm = 0; - HasPlusPrefix = 0; + HasAlternativeForm = false; + HasPlusPrefix = false; } else { llvm_unreachable("Unexpected type"); } @@ -951,6 +1020,8 @@ bool PrintfSpecifier::hasValidPlusPrefix() const { case ConversionSpecifier::AArg: case ConversionSpecifier::FreeBSDrArg: case ConversionSpecifier::FreeBSDyArg: + case ConversionSpecifier::rArg: + case ConversionSpecifier::kArg: return true; default: @@ -962,8 +1033,10 @@ bool PrintfSpecifier::hasValidAlternativeForm() const { if (!HasAlternativeForm) return true; - // Alternate form flag only valid with the oxXaAeEfFgG conversions + // Alternate form flag only valid with the bBoxXaAeEfFgGrRkK conversions switch (CS.getKind()) { + case ConversionSpecifier::bArg: + case ConversionSpecifier::BArg: case ConversionSpecifier::oArg: case ConversionSpecifier::OArg: case ConversionSpecifier::xArg: @@ -978,6 +1051,10 @@ bool PrintfSpecifier::hasValidAlternativeForm() const { case ConversionSpecifier::GArg: case ConversionSpecifier::FreeBSDrArg: case ConversionSpecifier::FreeBSDyArg: + case ConversionSpecifier::rArg: + case ConversionSpecifier::RArg: + case ConversionSpecifier::kArg: + case ConversionSpecifier::KArg: return true; default: @@ -989,8 +1066,10 @@ bool PrintfSpecifier::hasValidLeadingZeros() const { if (!HasLeadingZeroes) return true; - // Leading zeroes flag only valid with the diouxXaAeEfFgG conversions + // Leading zeroes flag only valid with the bBdiouxXaAeEfFgGrRkK conversions switch (CS.getKind()) { + case ConversionSpecifier::bArg: + case ConversionSpecifier::BArg: case ConversionSpecifier::dArg: case ConversionSpecifier::DArg: case ConversionSpecifier::iArg: @@ -1010,6 +1089,10 @@ bool PrintfSpecifier::hasValidLeadingZeros() const { case ConversionSpecifier::GArg: case ConversionSpecifier::FreeBSDrArg: case ConversionSpecifier::FreeBSDyArg: + case ConversionSpecifier::rArg: + case ConversionSpecifier::RArg: + case ConversionSpecifier::kArg: + case ConversionSpecifier::KArg: return true; default: @@ -1036,6 +1119,8 @@ bool PrintfSpecifier::hasValidSpacePrefix() const { case ConversionSpecifier::AArg: case ConversionSpecifier::FreeBSDrArg: case ConversionSpecifier::FreeBSDyArg: + case ConversionSpecifier::rArg: + case ConversionSpecifier::kArg: return true; default: @@ -1081,8 +1166,10 @@ bool PrintfSpecifier::hasValidPrecision() const { if (Precision.getHowSpecified() == OptionalAmount::NotSpecified) return true; - // Precision is only valid with the diouxXaAeEfFgGsP conversions + // Precision is only valid with the bBdiouxXaAeEfFgGsPrRkK conversions switch (CS.getKind()) { + case ConversionSpecifier::bArg: + case ConversionSpecifier::BArg: case ConversionSpecifier::dArg: case ConversionSpecifier::DArg: case ConversionSpecifier::iArg: @@ -1104,6 +1191,10 @@ bool PrintfSpecifier::hasValidPrecision() const { case ConversionSpecifier::FreeBSDrArg: case ConversionSpecifier::FreeBSDyArg: case ConversionSpecifier::PArg: + case ConversionSpecifier::rArg: + case ConversionSpecifier::RArg: + case ConversionSpecifier::kArg: + case ConversionSpecifier::KArg: return true; default: diff --git a/contrib/llvm-project/clang/lib/AST/QualTypeNames.cpp b/contrib/llvm-project/clang/lib/AST/QualTypeNames.cpp index 9a1b418f5ac1..4e1243ef79e8 100644 --- a/contrib/llvm-project/clang/lib/AST/QualTypeNames.cpp +++ b/contrib/llvm-project/clang/lib/AST/QualTypeNames.cpp @@ -65,8 +65,9 @@ static bool getFullyQualifiedTemplateName(const ASTContext &Ctx, assert(ArgTDecl != nullptr); QualifiedTemplateName *QTName = TName.getAsQualifiedTemplateName(); - if (QTName && !QTName->hasTemplateKeyword()) { - NNS = QTName->getQualifier(); + if (QTName && + !QTName->hasTemplateKeyword() && + (NNS = QTName->getQualifier())) { NestedNameSpecifier *QNNS = getFullyQualifiedNestedNameSpecifier( Ctx, NNS, WithGlobalNsPrefix); if (QNNS != NNS) { @@ -80,8 +81,12 @@ static bool getFullyQualifiedTemplateName(const ASTContext &Ctx, Ctx, ArgTDecl, true, WithGlobalNsPrefix); } if (NNS) { - TName = Ctx.getQualifiedTemplateName(NNS, - /*TemplateKeyword=*/false, ArgTDecl); + TemplateName UnderlyingTN(ArgTDecl); + if (UsingShadowDecl *USD = TName.getAsUsingShadowDecl()) + UnderlyingTN = TemplateName(USD); + TName = + Ctx.getQualifiedTemplateName(NNS, + /*TemplateKeyword=*/false, UnderlyingTN); Changed = true; } return Changed; @@ -125,11 +130,9 @@ static const Type *getFullyQualifiedTemplateType(const ASTContext &Ctx, if (const auto *TST = dyn_cast<const TemplateSpecializationType>(TypePtr)) { bool MightHaveChanged = false; SmallVector<TemplateArgument, 4> FQArgs; - for (TemplateSpecializationType::iterator I = TST->begin(), E = TST->end(); - I != E; ++I) { - // Cheap to copy and potentially modified by - // getFullyQualifedTemplateArgument. - TemplateArgument Arg(*I); + // Cheap to copy and potentially modified by + // getFullyQualifedTemplateArgument. + for (TemplateArgument Arg : TST->template_arguments()) { MightHaveChanged |= getFullyQualifiedTemplateArgument( Ctx, Arg, WithGlobalNsPrefix); FQArgs.push_back(Arg); @@ -267,8 +270,8 @@ static NestedNameSpecifier *createNestedNameSpecifierForScopeOf( assert(Decl); const DeclContext *DC = Decl->getDeclContext()->getRedeclContext(); - const auto *Outer = dyn_cast_or_null<NamedDecl>(DC); - const auto *OuterNS = dyn_cast_or_null<NamespaceDecl>(DC); + const auto *Outer = dyn_cast<NamedDecl>(DC); + const auto *OuterNS = dyn_cast<NamespaceDecl>(DC); if (Outer && !(OuterNS && OuterNS->isAnonymousNamespace())) { if (const auto *CxxDecl = dyn_cast<CXXRecordDecl>(DC)) { if (ClassTemplateDecl *ClassTempl = @@ -296,7 +299,7 @@ static NestedNameSpecifier *createNestedNameSpecifierForScopeOf( } else if (const auto *TD = dyn_cast<TagDecl>(Outer)) { return createNestedNameSpecifier( Ctx, TD, FullyQualified, WithGlobalNsPrefix); - } else if (dyn_cast<TranslationUnitDecl>(Outer)) { + } else if (isa<TranslationUnitDecl>(Outer)) { // Context is the TU. Nothing needs to be done. return nullptr; } else { @@ -438,12 +441,20 @@ QualType getFullyQualifiedType(QualType QT, const ASTContext &Ctx, // elaborated type. Qualifiers PrefixQualifiers = QT.getLocalQualifiers(); QT = QualType(QT.getTypePtr(), 0); - ElaboratedTypeKeyword Keyword = ETK_None; + ElaboratedTypeKeyword Keyword = ElaboratedTypeKeyword::None; if (const auto *ETypeInput = dyn_cast<ElaboratedType>(QT.getTypePtr())) { QT = ETypeInput->getNamedType(); assert(!QT.hasLocalQualifiers()); Keyword = ETypeInput->getKeyword(); } + + // We don't consider the alias introduced by `using a::X` as a new type. + // The qualified name is still a::X. + if (const auto *UT = QT->getAs<UsingType>()) { + QT = Ctx.getQualifiedType(UT->getUnderlyingType(), PrefixQualifiers); + return getFullyQualifiedType(QT, Ctx, WithGlobalNsPrefix); + } + // Create a nested name specifier if needed. Prefix = createNestedNameSpecifierForScopeOf(Ctx, QT.getTypePtr(), true /*FullyQualified*/, @@ -461,7 +472,7 @@ QualType getFullyQualifiedType(QualType QT, const ASTContext &Ctx, Ctx, QT.getTypePtr(), WithGlobalNsPrefix); QT = QualType(TypePtr, 0); } - if (Prefix || Keyword != ETK_None) { + if (Prefix || Keyword != ElaboratedTypeKeyword::None) { QT = Ctx.getElaboratedType(Keyword, Prefix, QT); } QT = Ctx.getQualifiedType(QT, PrefixQualifiers); diff --git a/contrib/llvm-project/clang/lib/AST/Randstruct.cpp b/contrib/llvm-project/clang/lib/AST/Randstruct.cpp new file mode 100644 index 000000000000..99c665f420e6 --- /dev/null +++ b/contrib/llvm-project/clang/lib/AST/Randstruct.cpp @@ -0,0 +1,231 @@ +//===--- Randstruct.cpp ---------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains the implementation for Clang's structure field layout +// randomization. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/Randstruct.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/ASTDiagnostic.h" +#include "clang/AST/Attr.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclCXX.h" // For StaticAssertDecl +#include "clang/Basic/Diagnostic.h" +#include "llvm/ADT/SmallVector.h" + +#include <algorithm> +#include <random> +#include <set> +#include <sstream> +#include <string> + +using clang::ASTContext; +using clang::FieldDecl; +using llvm::SmallVector; + +namespace { + +// FIXME: Replace this with some discovery once that mechanism exists. +enum { CACHE_LINE = 64 }; + +// The Bucket class holds the struct fields we're trying to fill to a +// cache-line. +class Bucket { + SmallVector<FieldDecl *, 64> Fields; + int Size = 0; + +public: + virtual ~Bucket() = default; + + SmallVector<FieldDecl *, 64> &fields() { return Fields; } + void addField(FieldDecl *Field, int FieldSize); + virtual bool canFit(int FieldSize) const { + return Size + FieldSize <= CACHE_LINE; + } + virtual bool isBitfieldRun() const { return false; } + bool full() const { return Size >= CACHE_LINE; } +}; + +void Bucket::addField(FieldDecl *Field, int FieldSize) { + Size += FieldSize; + Fields.push_back(Field); +} + +struct BitfieldRunBucket : public Bucket { + bool canFit(int FieldSize) const override { return true; } + bool isBitfieldRun() const override { return true; } +}; + +void randomizeStructureLayoutImpl(const ASTContext &Context, + llvm::SmallVectorImpl<FieldDecl *> &FieldsOut, + std::mt19937 &RNG) { + // All of the Buckets produced by best-effort cache-line algorithm. + SmallVector<std::unique_ptr<Bucket>, 16> Buckets; + + // The current bucket of fields that we are trying to fill to a cache-line. + std::unique_ptr<Bucket> CurrentBucket; + + // The current bucket containing the run of adjacent bitfields to ensure they + // remain adjacent. + std::unique_ptr<BitfieldRunBucket> CurrentBitfieldRun; + + // Tracks the number of fields that we failed to fit to the current bucket, + // and thus still need to be added later. + size_t Skipped = 0; + + while (!FieldsOut.empty()) { + // If we've Skipped more fields than we have remaining to place, that means + // that they can't fit in our current bucket, and we need to start a new + // one. + if (Skipped >= FieldsOut.size()) { + Skipped = 0; + Buckets.push_back(std::move(CurrentBucket)); + } + + // Take the first field that needs to be put in a bucket. + auto FieldIter = FieldsOut.begin(); + FieldDecl *FD = *FieldIter; + + if (FD->isBitField() && !FD->isZeroLengthBitField(Context)) { + // Start a bitfield run if this is the first bitfield we have found. + if (!CurrentBitfieldRun) + CurrentBitfieldRun = std::make_unique<BitfieldRunBucket>(); + + // We've placed the field, and can remove it from the "awaiting Buckets" + // vector called "Fields." + CurrentBitfieldRun->addField(FD, /*FieldSize is irrelevant here*/ 1); + FieldsOut.erase(FieldIter); + continue; + } + + // Else, current field is not a bitfield. If we were previously in a + // bitfield run, end it. + if (CurrentBitfieldRun) + Buckets.push_back(std::move(CurrentBitfieldRun)); + + // If we don't have a bucket, make one. + if (!CurrentBucket) + CurrentBucket = std::make_unique<Bucket>(); + + uint64_t Width = Context.getTypeInfo(FD->getType()).Width; + if (Width >= CACHE_LINE) { + std::unique_ptr<Bucket> OverSized = std::make_unique<Bucket>(); + OverSized->addField(FD, Width); + FieldsOut.erase(FieldIter); + Buckets.push_back(std::move(OverSized)); + continue; + } + + // If it fits, add it. + if (CurrentBucket->canFit(Width)) { + CurrentBucket->addField(FD, Width); + FieldsOut.erase(FieldIter); + + // If it's now full, tie off the bucket. + if (CurrentBucket->full()) { + Skipped = 0; + Buckets.push_back(std::move(CurrentBucket)); + } + } else { + // We can't fit it in our current bucket. Move to the end for processing + // later. + ++Skipped; // Mark it skipped. + FieldsOut.push_back(FD); + FieldsOut.erase(FieldIter); + } + } + + // Done processing the fields awaiting a bucket. + + // If we were filling a bucket, tie it off. + if (CurrentBucket) + Buckets.push_back(std::move(CurrentBucket)); + + // If we were processing a bitfield run bucket, tie it off. + if (CurrentBitfieldRun) + Buckets.push_back(std::move(CurrentBitfieldRun)); + + std::shuffle(std::begin(Buckets), std::end(Buckets), RNG); + + // Produce the new ordering of the elements from the Buckets. + SmallVector<FieldDecl *, 16> FinalOrder; + for (const std::unique_ptr<Bucket> &B : Buckets) { + llvm::SmallVectorImpl<FieldDecl *> &RandFields = B->fields(); + if (!B->isBitfieldRun()) + std::shuffle(std::begin(RandFields), std::end(RandFields), RNG); + + FinalOrder.insert(FinalOrder.end(), RandFields.begin(), RandFields.end()); + } + + FieldsOut = FinalOrder; +} + +} // anonymous namespace + +namespace clang { +namespace randstruct { + +bool randomizeStructureLayout(const ASTContext &Context, RecordDecl *RD, + SmallVectorImpl<Decl *> &FinalOrdering) { + SmallVector<FieldDecl *, 64> RandomizedFields; + SmallVector<Decl *, 8> PostRandomizedFields; + + unsigned TotalNumFields = 0; + for (Decl *D : RD->decls()) { + ++TotalNumFields; + if (auto *FD = dyn_cast<FieldDecl>(D)) + RandomizedFields.push_back(FD); + else if (isa<StaticAssertDecl>(D) || isa<IndirectFieldDecl>(D)) + PostRandomizedFields.push_back(D); + else + FinalOrdering.push_back(D); + } + + if (RandomizedFields.empty()) + return false; + + // Struct might end with a flexible array or an array of size 0 or 1, + // in which case we don't want to randomize it. + FieldDecl *FlexibleArray = + RD->hasFlexibleArrayMember() ? RandomizedFields.pop_back_val() : nullptr; + if (!FlexibleArray) { + if (const auto *CA = + dyn_cast<ConstantArrayType>(RandomizedFields.back()->getType())) + if (CA->getSize().sle(2)) + FlexibleArray = RandomizedFields.pop_back_val(); + } + + std::string Seed = + Context.getLangOpts().RandstructSeed + RD->getNameAsString(); + std::seed_seq SeedSeq(Seed.begin(), Seed.end()); + std::mt19937 RNG(SeedSeq); + + randomizeStructureLayoutImpl(Context, RandomizedFields, RNG); + + // Plorp the randomized decls into the final ordering. + FinalOrdering.insert(FinalOrdering.end(), RandomizedFields.begin(), + RandomizedFields.end()); + + // Add fields that belong towards the end of the RecordDecl. + FinalOrdering.insert(FinalOrdering.end(), PostRandomizedFields.begin(), + PostRandomizedFields.end()); + + // Add back the flexible array. + if (FlexibleArray) + FinalOrdering.push_back(FlexibleArray); + + assert(TotalNumFields == FinalOrdering.size() && + "Decl count has been altered after Randstruct randomization!"); + (void)TotalNumFields; + return true; +} + +} // end namespace randstruct +} // end namespace clang diff --git a/contrib/llvm-project/clang/lib/AST/RawCommentList.cpp b/contrib/llvm-project/clang/lib/AST/RawCommentList.cpp index a8d15036cab9..dffa007b6588 100644 --- a/contrib/llvm-project/clang/lib/AST/RawCommentList.cpp +++ b/contrib/llvm-project/clang/lib/AST/RawCommentList.cpp @@ -16,6 +16,7 @@ #include "clang/AST/CommentSema.h" #include "clang/Basic/CharInfo.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/StringExtras.h" #include "llvm/Support/Allocator.h" using namespace clang; @@ -140,8 +141,8 @@ RawComment::RawComment(const SourceManager &SourceMgr, SourceRange SR, Kind = K.first; IsTrailingComment |= K.second; - IsAlmostTrailingComment = RawText.startswith("//<") || - RawText.startswith("/*<"); + IsAlmostTrailingComment = + RawText.starts_with("//<") || RawText.starts_with("/*<"); } else { Kind = RCK_Merged; IsTrailingComment = @@ -362,6 +363,24 @@ std::string RawComment::getFormattedText(const SourceManager &SourceMgr, if (CommentText.empty()) return ""; + std::string Result; + for (const RawComment::CommentLine &Line : + getFormattedLines(SourceMgr, Diags)) + Result += Line.Text + "\n"; + + auto LastChar = Result.find_last_not_of('\n'); + Result.erase(LastChar + 1, Result.size()); + + return Result; +} + +std::vector<RawComment::CommentLine> +RawComment::getFormattedLines(const SourceManager &SourceMgr, + DiagnosticsEngine &Diags) const { + llvm::StringRef CommentText = getRawText(SourceMgr); + if (CommentText.empty()) + return {}; + llvm::BumpPtrAllocator Allocator; // We do not parse any commands, so CommentOptions are ignored by // comments::Lexer. Therefore, we just use default-constructed options. @@ -371,13 +390,23 @@ std::string RawComment::getFormattedText(const SourceManager &SourceMgr, CommentText.begin(), CommentText.end(), /*ParseCommands=*/false); - std::string Result; + std::vector<RawComment::CommentLine> Result; // A column number of the first non-whitespace token in the comment text. // We skip whitespace up to this column, but keep the whitespace after this // column. IndentColumn is calculated when lexing the first line and reused // for the rest of lines. unsigned IndentColumn = 0; + // Record the line number of the last processed comment line. + // For block-style comments, an extra newline token will be produced after + // the end-comment marker, e.g.: + // /** This is a multi-line comment block. + // The lexer will produce two newline tokens here > */ + // previousLine will record the line number when we previously saw a newline + // token and recorded a comment line. If we see another newline token on the + // same line, don't record anything in between. + unsigned PreviousLine = 0; + // Processes one line of the comment and adds it to the result. // Handles skipping the indent at the start of the line. // Returns false when eof is reached and true otherwise. @@ -389,9 +418,14 @@ std::string RawComment::getFormattedText(const SourceManager &SourceMgr, if (Tok.is(comments::tok::eof)) return false; if (Tok.is(comments::tok::newline)) { - Result += "\n"; + PresumedLoc Loc = SourceMgr.getPresumedLoc(Tok.getLocation()); + if (Loc.getLine() != PreviousLine) { + Result.emplace_back("", Loc, Loc); + PreviousLine = Loc.getLine(); + } return true; } + SmallString<124> Line; llvm::StringRef TokText = L.getSpelling(Tok, SourceMgr); bool LocInvalid = false; unsigned TokColumn = @@ -417,32 +451,35 @@ std::string RawComment::getFormattedText(const SourceManager &SourceMgr, WhitespaceLen, std::max<int>(static_cast<int>(IndentColumn) - TokColumn, 0)); llvm::StringRef Trimmed = TokText.drop_front(SkipLen); - Result += Trimmed; + Line += Trimmed; + // Get the beginning location of the adjusted comment line. + PresumedLoc Begin = + SourceMgr.getPresumedLoc(Tok.getLocation().getLocWithOffset(SkipLen)); + // Lex all tokens in the rest of the line. for (L.lex(Tok); Tok.isNot(comments::tok::eof); L.lex(Tok)) { if (Tok.is(comments::tok::newline)) { - Result += "\n"; + // Get the ending location of the comment line. + PresumedLoc End = SourceMgr.getPresumedLoc(Tok.getLocation()); + if (End.getLine() != PreviousLine) { + Result.emplace_back(Line, Begin, End); + PreviousLine = End.getLine(); + } return true; } - Result += L.getSpelling(Tok, SourceMgr); + Line += L.getSpelling(Tok, SourceMgr); } + PresumedLoc End = SourceMgr.getPresumedLoc(Tok.getLocation()); + Result.emplace_back(Line, Begin, End); // We've reached the end of file token. return false; }; - auto DropTrailingNewLines = [](std::string &Str) { - while (!Str.empty() && Str.back() == '\n') - Str.pop_back(); - }; - // Process first line separately to remember indent for the following lines. - if (!LexLine(/*IsFirstLine=*/true)) { - DropTrailingNewLines(Result); + if (!LexLine(/*IsFirstLine=*/true)) return Result; - } // Process the rest of the lines. while (LexLine(/*IsFirstLine=*/false)) ; - DropTrailingNewLines(Result); return Result; } diff --git a/contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp b/contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp index 972690becf9e..d9bf62c2bbb0 100644 --- a/contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp +++ b/contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp @@ -58,13 +58,13 @@ struct BaseSubobjectInfo { /// as DWARF, lacks all the information that was available at compile time, such /// as alignment attributes on fields and pragmas in effect. struct ExternalLayout { - ExternalLayout() : Size(0), Align(0) {} + ExternalLayout() = default; /// Overall record size in bits. - uint64_t Size; + uint64_t Size = 0; /// Overall record alignment in bits. - uint64_t Align; + uint64_t Align = 0; /// Record field offsets in bits. llvm::DenseMap<const FieldDecl *, uint64_t> FieldOffsets; @@ -240,7 +240,7 @@ EmptySubobjectMap::CanPlaceSubobjectAtOffset(const CXXRecordDecl *RD, return true; const ClassVectorTy &Classes = I->second; - if (llvm::find(Classes, RD) == Classes.end()) + if (!llvm::is_contained(Classes, RD)) return true; // There is already an empty class of the same type at this offset. @@ -602,21 +602,28 @@ protected: /// Whether the external AST source has provided a layout for this /// record. + LLVM_PREFERRED_TYPE(bool) unsigned UseExternalLayout : 1; /// Whether we need to infer alignment, even when we have an /// externally-provided layout. + LLVM_PREFERRED_TYPE(bool) unsigned InferAlignment : 1; /// Packed - Whether the record is packed or not. + LLVM_PREFERRED_TYPE(bool) unsigned Packed : 1; + LLVM_PREFERRED_TYPE(bool) unsigned IsUnion : 1; + LLVM_PREFERRED_TYPE(bool) unsigned IsMac68kAlign : 1; + LLVM_PREFERRED_TYPE(bool) unsigned IsNaturalAlign : 1; + LLVM_PREFERRED_TYPE(bool) unsigned IsMsStruct : 1; /// UnfilledBitsInLastUnit - If the last field laid out was a bitfield, @@ -1059,10 +1066,10 @@ void ItaniumRecordLayoutBuilder::LayoutNonVirtualBases( // primary base, add it in now. } else if (RD->isDynamicClass()) { assert(DataSize == 0 && "Vtable pointer must be at offset zero!"); - CharUnits PtrWidth = - Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0)); - CharUnits PtrAlign = - Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(0)); + CharUnits PtrWidth = Context.toCharUnitsFromBits( + Context.getTargetInfo().getPointerWidth(LangAS::Default)); + CharUnits PtrAlign = Context.toCharUnitsFromBits( + Context.getTargetInfo().getPointerAlign(LangAS::Default)); EnsureVTablePointerAlignment(PtrAlign); HasOwnVFPtr = true; @@ -1223,7 +1230,7 @@ ItaniumRecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) { // Per GCC's documentation, it only applies to non-static data members. return (Packed && ((Context.getLangOpts().getClangABICompat() <= LangOptions::ClangABI::Ver6) || - Context.getTargetInfo().getTriple().isPS4() || + Context.getTargetInfo().getTriple().isPS() || Context.getTargetInfo().getTriple().isOSAIX())) ? CharUnits::One() : UnpackedAlign; @@ -1261,7 +1268,9 @@ ItaniumRecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) { (!HasExternalLayout || Offset == CharUnits::Zero()) && EmptySubobjects->CanPlaceBaseAtOffset(Base, CharUnits::Zero())) { setSize(std::max(getSize(), Layout.getSize())); - UpdateAlignment(BaseAlign, UnpackedAlignTo, PreferredBaseAlign); + // On PS4/PS5, don't update the alignment, to preserve compatibility. + if (!Context.getTargetInfo().getTriple().isPS()) + UpdateAlignment(BaseAlign, UnpackedAlignTo, PreferredBaseAlign); return CharUnits::Zero(); } @@ -1538,7 +1547,7 @@ void ItaniumRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) { TypeInfo FieldInfo = Context.getTypeInfo(D->getType()); uint64_t StorageUnitSize = FieldInfo.Width; unsigned FieldAlign = FieldInfo.Align; - bool AlignIsRequired = FieldInfo.AlignIsRequired; + bool AlignIsRequired = FieldInfo.isAlignRequired(); // UnfilledBitsInLastUnit is the difference between the end of the // last allocated bitfield (i.e. the first bit offset available for @@ -1775,11 +1784,18 @@ void ItaniumRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) { !D->getIdentifier()) FieldAlign = UnpackedFieldAlign = 1; - // On AIX, zero-width bitfields pad out to the alignment boundary, but then - // do not affect overall record alignment if there is a pragma pack or - // pragma align(packed). - if (isAIXLayout(Context) && !MaxFieldAlignment.isZero() && !FieldSize) - FieldAlign = std::min(FieldAlign, MaxFieldAlignmentInBits); + // On AIX, zero-width bitfields pad out to the natural alignment boundary, + // but do not increase the alignment greater than the MaxFieldAlignment, or 1 + // if packed. + if (isAIXLayout(Context) && !FieldSize) { + if (FieldPacked) + FieldAlign = 1; + if (!MaxFieldAlignment.isZero()) { + UnpackedFieldAlign = + std::min(UnpackedFieldAlign, MaxFieldAlignmentInBits); + FieldAlign = std::min(FieldAlign, MaxFieldAlignmentInBits); + } + } // Diagnose differences in layout due to padding or packing. if (!UseExternalLayout) @@ -1844,9 +1860,8 @@ void ItaniumRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) { void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D, bool InsertExtraPadding) { auto *FieldClass = D->getType()->getAsCXXRecordDecl(); - bool PotentiallyOverlapping = D->hasAttr<NoUniqueAddressAttr>() && FieldClass; bool IsOverlappingEmptyField = - PotentiallyOverlapping && FieldClass->isEmpty(); + D->isPotentiallyOverlapping() && FieldClass->isEmpty(); CharUnits FieldOffset = (IsUnion || IsOverlappingEmptyField) ? CharUnits::Zero() : getDataSize(); @@ -1880,9 +1895,9 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D, UnfilledBitsInLastUnit = 0; LastBitfieldStorageUnitSize = 0; - bool FieldPacked = Packed || D->hasAttr<PackedAttr>(); + llvm::Triple Target = Context.getTargetInfo().getTriple(); - bool AlignIsRequired = false; + AlignRequirementKind AlignRequirement = AlignRequirementKind::None; CharUnits FieldSize; CharUnits FieldAlign; // The amount of this class's dsize occupied by the field. @@ -1897,23 +1912,17 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D, // aligned appropriately for their element type. EffectiveFieldSize = FieldSize = IsIncompleteArrayType ? CharUnits::Zero() : TI.Width; - AlignIsRequired = TI.AlignIsRequired; + AlignRequirement = TI.AlignRequirement; }; if (D->getType()->isIncompleteArrayType()) { setDeclInfo(true /* IsIncompleteArrayType */); - } else if (const ReferenceType *RT = D->getType()->getAs<ReferenceType>()) { - unsigned AS = Context.getTargetAddressSpace(RT->getPointeeType()); - EffectiveFieldSize = FieldSize = Context.toCharUnitsFromBits( - Context.getTargetInfo().getPointerWidth(AS)); - FieldAlign = Context.toCharUnitsFromBits( - Context.getTargetInfo().getPointerAlign(AS)); } else { setDeclInfo(false /* IsIncompleteArrayType */); // A potentially-overlapping field occupies its dsize or nvsize, whichever // is larger. - if (PotentiallyOverlapping) { + if (D->isPotentiallyOverlapping()) { const ASTRecordLayout &Layout = Context.getASTRecordLayout(FieldClass); EffectiveFieldSize = std::max(Layout.getNonVirtualSize(), Layout.getDataSize()); @@ -1947,7 +1956,7 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D, // Since the combination of -mms-bitfields together with structs // like max_align_t (which contains a long double) for mingw is - // quite comon (and GCC handles it silently), just handle it + // quite common (and GCC handles it silently), just handle it // silently there. For other targets that have ms_struct enabled // (most probably via a pragma or attribute), trigger a diagnostic // that defaults to an error. @@ -1961,6 +1970,27 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D, } } + bool FieldPacked = (Packed && (!FieldClass || FieldClass->isPOD() || + FieldClass->hasAttr<PackedAttr>() || + Context.getLangOpts().getClangABICompat() <= + LangOptions::ClangABI::Ver15 || + Target.isPS() || Target.isOSDarwin() || + Target.isOSAIX())) || + D->hasAttr<PackedAttr>(); + + // When used as part of a typedef, or together with a 'packed' attribute, the + // 'aligned' attribute can be used to decrease alignment. In that case, it + // overrides any computed alignment we have, and there is no need to upgrade + // the alignment. + auto alignedAttrCanDecreaseAIXAlignment = [AlignRequirement, FieldPacked] { + // Enum alignment sources can be safely ignored here, because this only + // helps decide whether we need the AIX alignment upgrade, which only + // applies to floating-point types. + return AlignRequirement == AlignRequirementKind::RequiredByTypedef || + (AlignRequirement == AlignRequirementKind::RequiredByRecord && + FieldPacked); + }; + // The AIX `power` alignment rules apply the natural alignment of the // "first member" if it is of a floating-point data type (or is an aggregate // whose recursively "first" member or element is such a type). The alignment @@ -1971,7 +2001,7 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D, // and zero-width bit-fields count as prior members; members of empty class // types marked `no_unique_address` are not considered to be prior members. CharUnits PreferredAlign = FieldAlign; - if (DefaultsToAIXPowerAlignment && !AlignIsRequired && + if (DefaultsToAIXPowerAlignment && !alignedAttrCanDecreaseAIXAlignment() && (FoundFirstNonOverlappingEmptyFieldForAIX || IsNaturalAlign)) { auto performBuiltinTypeAlignmentUpgrade = [&](const BuiltinType *BTy) { if (BTy->getKind() == BuiltinType::Double || @@ -1982,12 +2012,13 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D, } }; - const Type *Ty = D->getType()->getBaseElementTypeUnsafe(); - if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { - performBuiltinTypeAlignmentUpgrade(CTy->getElementType()->castAs<BuiltinType>()); - } else if (const BuiltinType *BTy = Ty->getAs<BuiltinType>()) { + const Type *BaseTy = D->getType()->getBaseElementTypeUnsafe(); + if (const ComplexType *CTy = BaseTy->getAs<ComplexType>()) { + performBuiltinTypeAlignmentUpgrade( + CTy->getElementType()->castAs<BuiltinType>()); + } else if (const BuiltinType *BTy = BaseTy->getAs<BuiltinType>()) { performBuiltinTypeAlignmentUpgrade(BTy); - } else if (const RecordType *RT = Ty->getAs<RecordType>()) { + } else if (const RecordType *RT = BaseTy->getAs<RecordType>()) { const RecordDecl *RD = RT->getDecl(); assert(RD && "Expected non-null RecordDecl."); const ASTRecordLayout &FieldRecord = Context.getASTRecordLayout(RD); @@ -1997,27 +2028,34 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D, // The align if the field is not packed. This is to check if the attribute // was unnecessary (-Wpacked). - CharUnits UnpackedFieldAlign = - !DefaultsToAIXPowerAlignment ? FieldAlign : PreferredAlign; + CharUnits UnpackedFieldAlign = FieldAlign; + CharUnits PackedFieldAlign = CharUnits::One(); CharUnits UnpackedFieldOffset = FieldOffset; + CharUnits OriginalFieldAlign = UnpackedFieldAlign; - if (FieldPacked) { - FieldAlign = CharUnits::One(); - PreferredAlign = CharUnits::One(); - } CharUnits MaxAlignmentInChars = Context.toCharUnitsFromBits(D->getMaxAlignment()); - FieldAlign = std::max(FieldAlign, MaxAlignmentInChars); + PackedFieldAlign = std::max(PackedFieldAlign, MaxAlignmentInChars); PreferredAlign = std::max(PreferredAlign, MaxAlignmentInChars); UnpackedFieldAlign = std::max(UnpackedFieldAlign, MaxAlignmentInChars); // The maximum field alignment overrides the aligned attribute. if (!MaxFieldAlignment.isZero()) { - FieldAlign = std::min(FieldAlign, MaxFieldAlignment); + PackedFieldAlign = std::min(PackedFieldAlign, MaxFieldAlignment); PreferredAlign = std::min(PreferredAlign, MaxFieldAlignment); UnpackedFieldAlign = std::min(UnpackedFieldAlign, MaxFieldAlignment); } + + if (!FieldPacked) + FieldAlign = UnpackedFieldAlign; + if (DefaultsToAIXPowerAlignment) + UnpackedFieldAlign = PreferredAlign; + if (FieldPacked) { + PreferredAlign = PackedFieldAlign; + FieldAlign = PackedFieldAlign; + } + CharUnits AlignTo = !DefaultsToAIXPowerAlignment ? FieldAlign : PreferredAlign; // Round up the current record size to the field's alignment boundary. @@ -2084,6 +2122,25 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D, // Remember max struct/class ABI-specified alignment. UnadjustedAlignment = std::max(UnadjustedAlignment, FieldAlign); UpdateAlignment(FieldAlign, UnpackedFieldAlign, PreferredAlign); + + // For checking the alignment of inner fields against + // the alignment of its parent record. + if (const RecordDecl *RD = D->getParent()) { + // Check if packed attribute or pragma pack is present. + if (RD->hasAttr<PackedAttr>() || !MaxFieldAlignment.isZero()) + if (FieldAlign < OriginalFieldAlign) + if (D->getType()->isRecordType()) { + // If the offset is a multiple of the alignment of + // the type, raise the warning. + // TODO: Takes no account the alignment of the outer struct + if (FieldOffset % OriginalFieldAlign != 0) + Diag(D->getLocation(), diag::warn_unaligned_access) + << Context.getTypeDeclType(RD) << D->getName() << D->getType(); + } + } + + if (Packed && !FieldPacked && PackedFieldAlign < FieldAlign) + Diag(D->getLocation(), diag::warn_unpacked_field) << D; } void ItaniumRecordLayoutBuilder::FinishLayout(const NamedDecl *D) { @@ -2148,11 +2205,19 @@ void ItaniumRecordLayoutBuilder::FinishLayout(const NamedDecl *D) { << (InBits ? 1 : 0); // (byte|bit) } + const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD); + // Warn if we packed it unnecessarily, when the unpacked alignment is not // greater than the one after packing, the size in bits doesn't change and // the offset of each field is identical. + // Unless the type is non-POD (for Clang ABI > 15), where the packed + // attribute on such a type does allow the type to be packed into other + // structures that use the packed attribute. if (Packed && UnpackedAlignment <= Alignment && - UnpackedSizeInBits == getSizeInBits() && !HasPackedField) + UnpackedSizeInBits == getSizeInBits() && !HasPackedField && + (!CXXRD || CXXRD->isPOD() || + Context.getLangOpts().getClangABICompat() <= + LangOptions::ClangABI::Ver15)) Diag(D->getLocation(), diag::warn_unnecessary_packed) << Context.getTypeDeclType(RD); } @@ -2209,9 +2274,12 @@ ItaniumRecordLayoutBuilder::updateExternalFieldOffset(const FieldDecl *Field, /// \returns diagnostic %select index. static unsigned getPaddingDiagFromTagKind(TagTypeKind Tag) { switch (Tag) { - case TTK_Struct: return 0; - case TTK_Interface: return 1; - case TTK_Class: return 2; + case TagTypeKind::Struct: + return 0; + case TagTypeKind::Interface: + return 1; + case TagTypeKind::Class: + return 2; default: llvm_unreachable("Invalid tag kind for field padding diagnostic!"); } } @@ -2239,19 +2307,22 @@ void ItaniumRecordLayoutBuilder::CheckFieldPadding( PadSize = PadSize / CharBitNum; InBits = false; } - if (D->getIdentifier()) - Diag(D->getLocation(), diag::warn_padded_struct_field) + if (D->getIdentifier()) { + auto Diagnostic = D->isBitField() ? diag::warn_padded_struct_bitfield + : diag::warn_padded_struct_field; + Diag(D->getLocation(), Diagnostic) << getPaddingDiagFromTagKind(D->getParent()->getTagKind()) - << Context.getTypeDeclType(D->getParent()) - << PadSize + << Context.getTypeDeclType(D->getParent()) << PadSize << (InBits ? 1 : 0) // (byte|bit) << D->getIdentifier(); - else - Diag(D->getLocation(), diag::warn_padded_struct_anon_field) + } else { + auto Diagnostic = D->isBitField() ? diag::warn_padded_struct_anon_bitfield + : diag::warn_padded_struct_anon_field; + Diag(D->getLocation(), Diagnostic) << getPaddingDiagFromTagKind(D->getParent()->getTagKind()) - << Context.getTypeDeclType(D->getParent()) - << PadSize + << Context.getTypeDeclType(D->getParent()) << PadSize << (InBits ? 1 : 0); // (byte|bit) + } } if (isPacked && Offset != UnpackedOffset) { HasPackedField = true; @@ -2285,7 +2356,7 @@ static const CXXMethodDecl *computeKeyFunction(ASTContext &Context, if (!MD->isVirtual()) continue; - if (MD->isPure()) + if (MD->isPureVirtual()) continue; // Ignore implicit member functions, they are always marked as inline, but @@ -2387,6 +2458,11 @@ static bool mustSkipTailPadding(TargetCXXABI ABI, const CXXRecordDecl *RD) { } static bool isMsLayout(const ASTContext &Context) { + // Check if it's CUDA device compilation; ensure layout consistency with host. + if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice && + Context.getAuxTargetInfo()) + return Context.getAuxTargetInfo()->getCXXABI().isMicrosoft(); + return Context.getTargetInfo().getCXXABI().isMicrosoft(); } @@ -2487,7 +2563,10 @@ struct MicrosoftRecordLayoutBuilder { CharUnits Alignment; }; typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits> BaseOffsetsMapTy; - MicrosoftRecordLayoutBuilder(const ASTContext &Context) : Context(Context) {} + MicrosoftRecordLayoutBuilder(const ASTContext &Context, + EmptySubobjectMap *EmptySubobjects) + : Context(Context), EmptySubobjects(EmptySubobjects) {} + private: MicrosoftRecordLayoutBuilder(const MicrosoftRecordLayoutBuilder &) = delete; void operator=(const MicrosoftRecordLayoutBuilder &) = delete; @@ -2537,6 +2616,8 @@ public: llvm::SmallPtrSetImpl<const CXXRecordDecl *> &HasVtorDispSet, const CXXRecordDecl *RD) const; const ASTContext &Context; + EmptySubobjectMap *EmptySubobjects; + /// The size of the record being laid out. CharUnits Size; /// The non-virtual size of the record layout. @@ -2610,7 +2691,7 @@ MicrosoftRecordLayoutBuilder::getAdjustedElementInfo( // Track zero-sized subobjects here where it's already available. EndsWithZeroSizedObject = Layout.endsWithZeroSizedObject(); // Respect required alignment, this is necessary because we may have adjusted - // the alignment in the case of pragam pack. Note that the required alignment + // the alignment in the case of pragma pack. Note that the required alignment // doesn't actually apply to the struct alignment at this point. Alignment = std::max(Alignment, Info.Alignment); RequiredAlignment = std::max(RequiredAlignment, Layout.getRequiredAlignment()); @@ -2712,7 +2793,8 @@ void MicrosoftRecordLayoutBuilder::initializeLayout(const RecordDecl *RD) { // than the pointer size. if (const MaxFieldAlignmentAttr *MFAA = RD->getAttr<MaxFieldAlignmentAttr>()){ unsigned PackedAlignment = MFAA->getAlignment(); - if (PackedAlignment <= Context.getTargetInfo().getPointerWidth(0)) + if (PackedAlignment <= + Context.getTargetInfo().getPointerWidth(LangAS::Default)) MaxFieldAlignment = Context.toCharUnitsFromBits(PackedAlignment); } // Packed attribute forces max field alignment to be 1. @@ -2737,10 +2819,10 @@ MicrosoftRecordLayoutBuilder::initializeCXXLayout(const CXXRecordDecl *RD) { SharedVBPtrBase = nullptr; // Calculate pointer size and alignment. These are used for vfptr and vbprt // injection. - PointerInfo.Size = - Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0)); - PointerInfo.Alignment = - Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(0)); + PointerInfo.Size = Context.toCharUnitsFromBits( + Context.getTargetInfo().getPointerWidth(LangAS::Default)); + PointerInfo.Alignment = Context.toCharUnitsFromBits( + Context.getTargetInfo().getPointerAlign(LangAS::Default)); // Respect pragma pack. if (!MaxFieldAlignment.isZero()) PointerInfo.Alignment = std::min(PointerInfo.Alignment, MaxFieldAlignment); @@ -2849,8 +2931,7 @@ static bool recordUsesEBO(const RecordDecl *RD) { } void MicrosoftRecordLayoutBuilder::layoutNonVirtualBase( - const CXXRecordDecl *RD, - const CXXRecordDecl *BaseDecl, + const CXXRecordDecl *RD, const CXXRecordDecl *BaseDecl, const ASTRecordLayout &BaseLayout, const ASTRecordLayout *&PreviousBaseLayout) { // Insert padding between two bases if the left first one is zero sized or @@ -2867,15 +2948,14 @@ void MicrosoftRecordLayoutBuilder::layoutNonVirtualBase( bool FoundBase = false; if (UseExternalLayout) { FoundBase = External.getExternalNVBaseOffset(BaseDecl, BaseOffset); - if (FoundBase) { - assert(BaseOffset >= Size && "base offset already allocated"); + if (BaseOffset > Size) { Size = BaseOffset; } } if (!FoundBase) { - if (MDCUsesEBO && BaseDecl->isEmpty()) { - assert(BaseLayout.getNonVirtualSize() == CharUnits::Zero()); + if (MDCUsesEBO && BaseDecl->isEmpty() && + (BaseLayout.getNonVirtualSize() == CharUnits::Zero())) { BaseOffset = CharUnits::Zero(); } else { // Otherwise, lay the base out at the end of the MDC. @@ -2884,6 +2964,7 @@ void MicrosoftRecordLayoutBuilder::layoutNonVirtualBase( } Bases.insert(std::make_pair(BaseDecl, BaseOffset)); Size += BaseLayout.getNonVirtualSize(); + DataSize = Size; PreviousBaseLayout = &BaseLayout; } @@ -2901,15 +2982,43 @@ void MicrosoftRecordLayoutBuilder::layoutField(const FieldDecl *FD) { LastFieldIsNonZeroWidthBitfield = false; ElementInfo Info = getAdjustedElementInfo(FD); Alignment = std::max(Alignment, Info.Alignment); - CharUnits FieldOffset; - if (UseExternalLayout) + + const CXXRecordDecl *FieldClass = FD->getType()->getAsCXXRecordDecl(); + bool IsOverlappingEmptyField = FD->isPotentiallyOverlapping() && + FieldClass->isEmpty() && + FieldClass->fields().empty(); + CharUnits FieldOffset = CharUnits::Zero(); + + if (UseExternalLayout) { FieldOffset = Context.toCharUnitsFromBits(External.getExternalFieldOffset(FD)); - else if (IsUnion) + } else if (IsUnion) { FieldOffset = CharUnits::Zero(); - else + } else if (EmptySubobjects) { + if (!IsOverlappingEmptyField) + FieldOffset = DataSize.alignTo(Info.Alignment); + + while (!EmptySubobjects->CanPlaceFieldAtOffset(FD, FieldOffset)) { + const CXXRecordDecl *ParentClass = cast<CXXRecordDecl>(FD->getParent()); + bool HasBases = ParentClass && (!ParentClass->bases().empty() || + !ParentClass->vbases().empty()); + if (FieldOffset == CharUnits::Zero() && DataSize != CharUnits::Zero() && + HasBases) { + // MSVC appears to only do this when there are base classes; + // otherwise it overlaps no_unique_address fields in non-zero offsets. + FieldOffset = DataSize.alignTo(Info.Alignment); + } else { + FieldOffset += Info.Alignment; + } + } + } else { FieldOffset = Size.alignTo(Info.Alignment); + } placeFieldAtOffset(FieldOffset); + + if (!IsOverlappingEmptyField) + DataSize = std::max(DataSize, FieldOffset + Info.Size); + Size = std::max(Size, FieldOffset + Info.Size); } @@ -2955,6 +3064,7 @@ void MicrosoftRecordLayoutBuilder::layoutBitField(const FieldDecl *FD) { Alignment = std::max(Alignment, Info.Alignment); RemainingBitsInField = Context.toBits(Info.Size) - Width; } + DataSize = Size; } void @@ -2980,6 +3090,7 @@ MicrosoftRecordLayoutBuilder::layoutZeroWidthBitField(const FieldDecl *FD) { Size = FieldOffset; Alignment = std::max(Alignment, Info.Alignment); } + DataSize = Size; } void MicrosoftRecordLayoutBuilder::injectVBPtr(const CXXRecordDecl *RD) { @@ -3025,10 +3136,9 @@ void MicrosoftRecordLayoutBuilder::injectVFPtr(const CXXRecordDecl *RD) { VBPtrOffset += Offset; if (UseExternalLayout) { - // The class may have no bases or fields, but still have a vfptr - // (e.g. it's an interface class). The size was not correctly set before - // in this case. - if (FieldOffsets.empty() && Bases.empty()) + // The class may have size 0 and a vfptr (e.g. it's an interface class). The + // size was not correctly set before in this case. + if (Size.isZero()) Size += Offset; return; } @@ -3070,7 +3180,7 @@ void MicrosoftRecordLayoutBuilder::layoutVirtualBases(const CXXRecordDecl *RD) { for (const CXXBaseSpecifier &VBase : RD->vbases()) { const CXXRecordDecl *BaseDecl = VBase.getType()->getAsCXXRecordDecl(); const ASTRecordLayout &BaseLayout = Context.getASTRecordLayout(BaseDecl); - bool HasVtordisp = HasVtorDispSet.count(BaseDecl) > 0; + bool HasVtordisp = HasVtorDispSet.contains(BaseDecl); // Insert padding between two bases if the left first one is zero sized or // contains a zero sized subobject and the right is zero sized or one leads // with a zero sized base. The padding between virtual bases is 4 @@ -3195,7 +3305,7 @@ void MicrosoftRecordLayoutBuilder::computeVtorDispSet( // Seed the working set with our non-destructor, non-pure virtual methods. for (const CXXMethodDecl *MD : RD->methods()) if (MicrosoftVTableContext::hasVtableSlot(MD) && - !isa<CXXDestructorDecl>(MD) && !MD->isPure()) + !isa<CXXDestructorDecl>(MD) && !MD->isPureVirtual()) Work.insert(MD); while (!Work.empty()) { const CXXMethodDecl *MD = *Work.begin(); @@ -3230,6 +3340,8 @@ ASTContext::getASTRecordLayout(const RecordDecl *D) const { if (D->hasExternalLexicalStorage() && !D->getDefinition()) getExternalSource()->CompleteType(const_cast<RecordDecl*>(D)); + // Complete the redecl chain (if necessary). + (void)D->getMostRecentDecl(); D = D->getDefinition(); assert(D && "Cannot get layout of forward declarations!"); @@ -3245,8 +3357,9 @@ ASTContext::getASTRecordLayout(const RecordDecl *D) const { const ASTRecordLayout *NewEntry = nullptr; if (isMsLayout(*this)) { - MicrosoftRecordLayoutBuilder Builder(*this); if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) { + EmptySubobjectMap EmptySubobjects(*this, RD); + MicrosoftRecordLayoutBuilder Builder(*this, &EmptySubobjects); Builder.cxxLayout(RD); NewEntry = new (*this) ASTRecordLayout( *this, Builder.Size, Builder.Alignment, Builder.Alignment, @@ -3258,6 +3371,7 @@ ASTContext::getASTRecordLayout(const RecordDecl *D) const { Builder.EndsWithZeroSizedObject, Builder.LeadsWithZeroSizedBase, Builder.Bases, Builder.VBases); } else { + MicrosoftRecordLayoutBuilder Builder(*this, /*EmptySubobjects=*/nullptr); Builder.layout(D); NewEntry = new (*this) ASTRecordLayout( *this, Builder.Size, Builder.Alignment, Builder.Alignment, @@ -3383,6 +3497,7 @@ uint64_t ASTContext::getFieldOffset(const ValueDecl *VD) const { uint64_t ASTContext::lookupFieldBitOffset(const ObjCInterfaceDecl *OID, const ObjCImplementationDecl *ID, const ObjCIvarDecl *Ivar) const { + Ivar = Ivar->getCanonicalDecl(); const ObjCInterfaceDecl *Container = Ivar->getContainingInterface(); // FIXME: We should eliminate the need to have ObjCImplementationDecl passed @@ -3501,7 +3616,7 @@ static void DumpRecordLayout(raw_ostream &OS, const RecordDecl *RD, auto CXXRD = dyn_cast<CXXRecordDecl>(RD); PrintOffset(OS, Offset, IndentLevel); - OS << C.getTypeDeclType(const_cast<RecordDecl*>(RD)).getAsString(); + OS << C.getTypeDeclType(const_cast<RecordDecl *>(RD)); if (Description) OS << ' ' << Description; if (CXXRD && CXXRD->isEmpty()) @@ -3586,7 +3701,7 @@ static void DumpRecordLayout(raw_ostream &OS, const RecordDecl *RD, const QualType &FieldType = C.getLangOpts().DumpRecordLayoutsCanonical ? Field.getType().getCanonicalType() : Field.getType(); - OS << FieldType.getAsString() << ' ' << Field << '\n'; + OS << FieldType << ' ' << Field << '\n'; } // Dump virtual bases. @@ -3652,7 +3767,7 @@ void ASTContext::DumpRecordLayout(const RecordDecl *RD, raw_ostream &OS, // in libFrontend. const ASTRecordLayout &Info = getASTRecordLayout(RD); - OS << "Type: " << getTypeDeclType(RD).getAsString() << "\n"; + OS << "Type: " << getTypeDeclType(RD) << "\n"; OS << "\nLayout: "; OS << "<ASTRecordLayout\n"; OS << " Size:" << toBits(Info.getSize()) << "\n"; @@ -3662,6 +3777,28 @@ void ASTContext::DumpRecordLayout(const RecordDecl *RD, raw_ostream &OS, if (Target->defaultsToAIXPowerAlignment()) OS << " PreferredAlignment:" << toBits(Info.getPreferredAlignment()) << "\n"; + if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { + OS << " BaseOffsets: ["; + const CXXRecordDecl *Base = nullptr; + for (auto I : CXXRD->bases()) { + if (I.isVirtual()) + continue; + if (Base) + OS << ", "; + Base = I.getType()->getAsCXXRecordDecl(); + OS << Info.CXXInfo->BaseOffsets[Base].getQuantity(); + } + OS << "]>\n"; + OS << " VBaseOffsets: ["; + const CXXRecordDecl *VBase = nullptr; + for (auto I : CXXRD->vbases()) { + if (VBase) + OS << ", "; + VBase = I.getType()->getAsCXXRecordDecl(); + OS << Info.CXXInfo->VBaseOffsets[VBase].VBaseOffset.getQuantity(); + } + OS << "]>\n"; + } OS << " FieldOffsets: ["; for (unsigned i = 0, e = Info.getFieldCount(); i != e; ++i) { if (i) diff --git a/contrib/llvm-project/clang/lib/AST/ScanfFormatString.cpp b/contrib/llvm-project/clang/lib/AST/ScanfFormatString.cpp index 8d763f28e57f..7ee21c8c6195 100644 --- a/contrib/llvm-project/clang/lib/AST/ScanfFormatString.cpp +++ b/contrib/llvm-project/clang/lib/AST/ScanfFormatString.cpp @@ -161,6 +161,7 @@ static ScanfSpecifierResult ParseScanfSpecifier(FormatStringHandler &H, default: break; case '%': k = ConversionSpecifier::PercentArg; break; + case 'b': k = ConversionSpecifier::bArg; break; case 'A': k = ConversionSpecifier::AArg; break; case 'E': k = ConversionSpecifier::EArg; break; case 'F': k = ConversionSpecifier::FArg; break; @@ -267,6 +268,7 @@ ArgType ScanfSpecifier::getArgType(ASTContext &Ctx) const { llvm_unreachable("Unsupported LengthModifier Type"); // Unsigned int. + case ConversionSpecifier::bArg: case ConversionSpecifier::oArg: case ConversionSpecifier::OArg: case ConversionSpecifier::uArg: @@ -343,7 +345,7 @@ ArgType ScanfSpecifier::getArgType(ASTContext &Ctx) const { case LengthModifier::AsShort: if (Ctx.getTargetInfo().getTriple().isOSMSVCRT()) return ArgType::PtrTo(ArgType::AnyCharTy); - LLVM_FALLTHROUGH; + [[fallthrough]]; default: return ArgType::Invalid(); } @@ -360,7 +362,7 @@ ArgType ScanfSpecifier::getArgType(ASTContext &Ctx) const { case LengthModifier::AsShort: if (Ctx.getTargetInfo().getTriple().isOSMSVCRT()) return ArgType::PtrTo(ArgType::AnyCharTy); - LLVM_FALLTHROUGH; + [[fallthrough]]; default: return ArgType::Invalid(); } @@ -444,11 +446,9 @@ bool ScanfSpecifier::fixType(QualType QT, QualType RawQT, // If we know the target array length, we can use it as a field width. if (const ConstantArrayType *CAT = Ctx.getAsConstantArrayType(RawQT)) { - if (CAT->getSizeModifier() == ArrayType::Normal) + if (CAT->getSizeModifier() == ArraySizeModifier::Normal) FieldWidth = OptionalAmount(OptionalAmount::Constant, - CAT->getSize().getZExtValue() - 1, - "", 0, false); - + CAT->getZExtSize() - 1, "", 0, false); } return true; } @@ -500,7 +500,7 @@ bool ScanfSpecifier::fixType(QualType QT, QualType RawQT, } // Handle size_t, ptrdiff_t, etc. that have dedicated length modifiers in C99. - if (isa<TypedefType>(PT) && (LangOpt.C99 || LangOpt.CPlusPlus11)) + if (LangOpt.C99 || LangOpt.CPlusPlus11) namedTypeToLengthModifier(PT, LM); // If fixing the length modifier was enough, we are done. diff --git a/contrib/llvm-project/clang/lib/AST/SelectorLocationsKind.cpp b/contrib/llvm-project/clang/lib/AST/SelectorLocationsKind.cpp index 2c34c9c60c2b..ebe6324f904c 100644 --- a/contrib/llvm-project/clang/lib/AST/SelectorLocationsKind.cpp +++ b/contrib/llvm-project/clang/lib/AST/SelectorLocationsKind.cpp @@ -26,7 +26,7 @@ static SourceLocation getStandardSelLoc(unsigned Index, assert(Index == 0); if (EndLoc.isInvalid()) return SourceLocation(); - IdentifierInfo *II = Sel.getIdentifierInfoForSlot(0); + const IdentifierInfo *II = Sel.getIdentifierInfoForSlot(0); unsigned Len = II ? II->getLength() : 0; return EndLoc.getLocWithOffset(-Len); } @@ -34,7 +34,7 @@ static SourceLocation getStandardSelLoc(unsigned Index, assert(Index < NumSelArgs); if (ArgLoc.isInvalid()) return SourceLocation(); - IdentifierInfo *II = Sel.getIdentifierInfoForSlot(Index); + const IdentifierInfo *II = Sel.getIdentifierInfoForSlot(Index); unsigned Len = /* selector id */ (II ? II->getLength() : 0) + /* ':' */ 1; if (WithArgSpace) ++Len; diff --git a/contrib/llvm-project/clang/lib/AST/Stmt.cpp b/contrib/llvm-project/clang/lib/AST/Stmt.cpp index 47693ef9fee3..fe59d6070b3e 100644 --- a/contrib/llvm-project/clang/lib/AST/Stmt.cpp +++ b/contrib/llvm-project/clang/lib/AST/Stmt.cpp @@ -23,6 +23,7 @@ #include "clang/AST/ExprOpenMP.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" +#include "clang/AST/StmtOpenACC.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/Type.h" #include "clang/Basic/CharInfo.h" @@ -41,6 +42,7 @@ #include <algorithm> #include <cassert> #include <cstring> +#include <optional> #include <string> #include <type_traits> #include <utility> @@ -361,12 +363,14 @@ int64_t Stmt::getID(const ASTContext &Context) const { return Context.getAllocator().identifyKnownAlignedObject<Stmt>(this); } -CompoundStmt::CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, - SourceLocation RB) - : Stmt(CompoundStmtClass), RBraceLoc(RB) { +CompoundStmt::CompoundStmt(ArrayRef<Stmt *> Stmts, FPOptionsOverride FPFeatures, + SourceLocation LB, SourceLocation RB) + : Stmt(CompoundStmtClass), LBraceLoc(LB), RBraceLoc(RB) { CompoundStmtBits.NumStmts = Stmts.size(); + CompoundStmtBits.HasFPFeatures = FPFeatures.requiresTrailingStorage(); setStmts(Stmts); - CompoundStmtBits.LBraceLoc = LB; + if (hasStoredFPFeatures()) + setStoredFPFeatures(FPFeatures); } void CompoundStmt::setStmts(ArrayRef<Stmt *> Stmts) { @@ -377,18 +381,23 @@ void CompoundStmt::setStmts(ArrayRef<Stmt *> Stmts) { } CompoundStmt *CompoundStmt::Create(const ASTContext &C, ArrayRef<Stmt *> Stmts, + FPOptionsOverride FPFeatures, SourceLocation LB, SourceLocation RB) { void *Mem = - C.Allocate(totalSizeToAlloc<Stmt *>(Stmts.size()), alignof(CompoundStmt)); - return new (Mem) CompoundStmt(Stmts, LB, RB); + C.Allocate(totalSizeToAlloc<Stmt *, FPOptionsOverride>( + Stmts.size(), FPFeatures.requiresTrailingStorage()), + alignof(CompoundStmt)); + return new (Mem) CompoundStmt(Stmts, FPFeatures, LB, RB); } -CompoundStmt *CompoundStmt::CreateEmpty(const ASTContext &C, - unsigned NumStmts) { - void *Mem = - C.Allocate(totalSizeToAlloc<Stmt *>(NumStmts), alignof(CompoundStmt)); +CompoundStmt *CompoundStmt::CreateEmpty(const ASTContext &C, unsigned NumStmts, + bool HasFPFeatures) { + void *Mem = C.Allocate( + totalSizeToAlloc<Stmt *, FPOptionsOverride>(NumStmts, HasFPFeatures), + alignof(CompoundStmt)); CompoundStmt *New = new (Mem) CompoundStmt(EmptyShell()); New->CompoundStmtBits.NumStmts = NumStmts; + New->CompoundStmtBits.HasFPFeatures = HasFPFeatures; return New; } @@ -568,21 +577,20 @@ void GCCAsmStmt::setOutputsAndInputsAndClobbers(const ASTContext &C, /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int GCCAsmStmt::getNamedOperand(StringRef SymbolicName) const { - unsigned NumPlusOperands = 0; - // Check if this is an output operand. - for (unsigned i = 0, e = getNumOutputs(); i != e; ++i) { + unsigned NumOutputs = getNumOutputs(); + for (unsigned i = 0; i != NumOutputs; ++i) if (getOutputName(i) == SymbolicName) return i; - } - for (unsigned i = 0, e = getNumInputs(); i != e; ++i) + unsigned NumInputs = getNumInputs(); + for (unsigned i = 0; i != NumInputs; ++i) if (getInputName(i) == SymbolicName) - return getNumOutputs() + NumPlusOperands + i; + return NumOutputs + i; for (unsigned i = 0, e = getNumLabels(); i != e; ++i) if (getLabelName(i) == SymbolicName) - return i + getNumOutputs() + getNumInputs(); + return NumOutputs + NumInputs + getNumPlusOperands() + i; // Not found. return -1; @@ -804,11 +812,12 @@ std::string MSAsmStmt::generateAsmString(const ASTContext &C) const { StringRef Instruction = Pieces[I]; // For vex/vex2/vex3/evex masm style prefix, convert it to att style // since we don't support masm style prefix in backend. - if (Instruction.startswith("vex ")) + if (Instruction.starts_with("vex ")) MSAsmString += '{' + Instruction.substr(0, 3).str() + '}' + Instruction.substr(3).str(); - else if (Instruction.startswith("vex2 ") || - Instruction.startswith("vex3 ") || Instruction.startswith("evex ")) + else if (Instruction.starts_with("vex2 ") || + Instruction.starts_with("vex3 ") || + Instruction.starts_with("evex ")) MSAsmString += '{' + Instruction.substr(0, 4).str() + '}' + Instruction.substr(4).str(); else @@ -912,7 +921,7 @@ void MSAsmStmt::initialize(const ASTContext &C, StringRef asmstr, }); } -IfStmt::IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, +IfStmt::IfStmt(const ASTContext &Ctx, SourceLocation IL, IfStatementKind Kind, Stmt *Init, VarDecl *Var, Expr *Cond, SourceLocation LPL, SourceLocation RPL, Stmt *Then, SourceLocation EL, Stmt *Else) : Stmt(IfStmtClass), LParenLoc(LPL), RParenLoc(RPL) { @@ -923,7 +932,7 @@ IfStmt::IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, IfStmtBits.HasVar = HasVar; IfStmtBits.HasInit = HasInit; - setConstexpr(IsConstexpr); + setStatementKind(Kind); setCond(Cond); setThen(Then); @@ -947,9 +956,9 @@ IfStmt::IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit) } IfStmt *IfStmt::Create(const ASTContext &Ctx, SourceLocation IL, - bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, - SourceLocation LPL, SourceLocation RPL, Stmt *Then, - SourceLocation EL, Stmt *Else) { + IfStatementKind Kind, Stmt *Init, VarDecl *Var, + Expr *Cond, SourceLocation LPL, SourceLocation RPL, + Stmt *Then, SourceLocation EL, Stmt *Else) { bool HasElse = Else != nullptr; bool HasVar = Var != nullptr; bool HasInit = Init != nullptr; @@ -958,7 +967,7 @@ IfStmt *IfStmt::Create(const ASTContext &Ctx, SourceLocation IL, NumMandatoryStmtPtr + HasElse + HasVar + HasInit, HasElse), alignof(IfStmt)); return new (Mem) - IfStmt(Ctx, IL, IsConstexpr, Init, Var, Cond, LPL, RPL, Then, EL, Else); + IfStmt(Ctx, IL, Kind, Init, Var, Cond, LPL, RPL, Then, EL, Else); } IfStmt *IfStmt::CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar, @@ -995,18 +1004,18 @@ bool IfStmt::isObjCAvailabilityCheck() const { return isa<ObjCAvailabilityCheckExpr>(getCond()); } -Optional<Stmt *> IfStmt::getNondiscardedCase(const ASTContext &Ctx) { +std::optional<Stmt *> IfStmt::getNondiscardedCase(const ASTContext &Ctx) { if (!isConstexpr() || getCond()->isValueDependent()) - return None; + return std::nullopt; return !getCond()->EvaluateKnownConstInt(Ctx) ? getElse() : getThen(); } -Optional<const Stmt *> +std::optional<const Stmt *> IfStmt::getNondiscardedCase(const ASTContext &Ctx) const { - if (Optional<Stmt *> Result = + if (std::optional<Stmt *> Result = const_cast<IfStmt *>(this)->getNondiscardedCase(Ctx)) return *Result; - return None; + return std::nullopt; } ForStmt::ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, @@ -1338,6 +1347,11 @@ CapturedStmt::CapturedStmt(EmptyShell Empty, unsigned NumCaptures) : Stmt(CapturedStmtClass, Empty), NumCaptures(NumCaptures), CapDeclAndKind(nullptr, CR_Default) { getStoredStmts()[NumCaptures] = nullptr; + + // Construct default capture objects. + Capture *Buffer = getStoredCaptures(); + for (unsigned I = 0, N = NumCaptures; I != N; ++I) + new (Buffer++) Capture(); } CapturedStmt *CapturedStmt::Create(const ASTContext &Context, Stmt *S, diff --git a/contrib/llvm-project/clang/lib/AST/StmtCXX.cpp b/contrib/llvm-project/clang/lib/AST/StmtCXX.cpp index 060d090fc06a..0d6fc848f739 100644 --- a/contrib/llvm-project/clang/lib/AST/StmtCXX.cpp +++ b/contrib/llvm-project/clang/lib/AST/StmtCXX.cpp @@ -23,7 +23,8 @@ QualType CXXCatchStmt::getCaughtType() const { } CXXTryStmt *CXXTryStmt::Create(const ASTContext &C, SourceLocation tryLoc, - Stmt *tryBlock, ArrayRef<Stmt *> handlers) { + CompoundStmt *tryBlock, + ArrayRef<Stmt *> handlers) { const size_t Size = totalSizeToAlloc<Stmt *>(handlers.size() + 1); void *Mem = C.Allocate(Size, alignof(CXXTryStmt)); return new (Mem) CXXTryStmt(tryLoc, tryBlock, handlers); @@ -36,7 +37,7 @@ CXXTryStmt *CXXTryStmt::Create(const ASTContext &C, EmptyShell Empty, return new (Mem) CXXTryStmt(Empty, numHandlers); } -CXXTryStmt::CXXTryStmt(SourceLocation tryLoc, Stmt *tryBlock, +CXXTryStmt::CXXTryStmt(SourceLocation tryLoc, CompoundStmt *tryBlock, ArrayRef<Stmt *> handlers) : Stmt(CXXTryStmtClass), TryLoc(tryLoc), NumHandlers(handlers.size()) { Stmt **Stmts = getStmts(); @@ -117,8 +118,8 @@ CoroutineBodyStmt::CoroutineBodyStmt(CoroutineBodyStmt::CtorArgs const &Args) SubStmts[CoroutineBodyStmt::OnFallthrough] = Args.OnFallthrough; SubStmts[CoroutineBodyStmt::Allocate] = Args.Allocate; SubStmts[CoroutineBodyStmt::Deallocate] = Args.Deallocate; - SubStmts[CoroutineBodyStmt::ReturnValue] = Args.ReturnValue; SubStmts[CoroutineBodyStmt::ResultDecl] = Args.ResultDecl; + SubStmts[CoroutineBodyStmt::ReturnValue] = Args.ReturnValue; SubStmts[CoroutineBodyStmt::ReturnStmt] = Args.ReturnStmt; SubStmts[CoroutineBodyStmt::ReturnStmtOnAllocFailure] = Args.ReturnStmtOnAllocFailure; diff --git a/contrib/llvm-project/clang/lib/AST/StmtObjC.cpp b/contrib/llvm-project/clang/lib/AST/StmtObjC.cpp index 3d586795517c..12d8a9e7dac8 100644 --- a/contrib/llvm-project/clang/lib/AST/StmtObjC.cpp +++ b/contrib/llvm-project/clang/lib/AST/StmtObjC.cpp @@ -46,9 +46,8 @@ ObjCAtTryStmt *ObjCAtTryStmt::Create(const ASTContext &Context, SourceLocation atTryLoc, Stmt *atTryStmt, Stmt **CatchStmts, unsigned NumCatchStmts, Stmt *atFinallyStmt) { - unsigned Size = - sizeof(ObjCAtTryStmt) + - (1 + NumCatchStmts + (atFinallyStmt != nullptr)) * sizeof(Stmt *); + size_t Size = + totalSizeToAlloc<Stmt *>(1 + NumCatchStmts + (atFinallyStmt != nullptr)); void *Mem = Context.Allocate(Size, alignof(ObjCAtTryStmt)); return new (Mem) ObjCAtTryStmt(atTryLoc, atTryStmt, CatchStmts, NumCatchStmts, atFinallyStmt); @@ -57,8 +56,7 @@ ObjCAtTryStmt *ObjCAtTryStmt::Create(const ASTContext &Context, ObjCAtTryStmt *ObjCAtTryStmt::CreateEmpty(const ASTContext &Context, unsigned NumCatchStmts, bool HasFinally) { - unsigned Size = - sizeof(ObjCAtTryStmt) + (1 + NumCatchStmts + HasFinally) * sizeof(Stmt *); + size_t Size = totalSizeToAlloc<Stmt *>(1 + NumCatchStmts + HasFinally); void *Mem = Context.Allocate(Size, alignof(ObjCAtTryStmt)); return new (Mem) ObjCAtTryStmt(EmptyShell(), NumCatchStmts, HasFinally); } diff --git a/contrib/llvm-project/clang/lib/AST/StmtOpenACC.cpp b/contrib/llvm-project/clang/lib/AST/StmtOpenACC.cpp new file mode 100644 index 000000000000..2d864a288579 --- /dev/null +++ b/contrib/llvm-project/clang/lib/AST/StmtOpenACC.cpp @@ -0,0 +1,125 @@ +//===--- StmtOpenACC.cpp - Classes for OpenACC Constructs -----------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements the subclasses of Stmt class declared in StmtOpenACC.h +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/StmtOpenACC.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/RecursiveASTVisitor.h" +#include "clang/AST/StmtCXX.h" +using namespace clang; + +OpenACCComputeConstruct * +OpenACCComputeConstruct::CreateEmpty(const ASTContext &C, unsigned NumClauses) { + void *Mem = C.Allocate( + OpenACCComputeConstruct::totalSizeToAlloc<const OpenACCClause *>( + NumClauses)); + auto *Inst = new (Mem) OpenACCComputeConstruct(NumClauses); + return Inst; +} + +OpenACCComputeConstruct *OpenACCComputeConstruct::Create( + const ASTContext &C, OpenACCDirectiveKind K, SourceLocation BeginLoc, + SourceLocation DirLoc, SourceLocation EndLoc, + ArrayRef<const OpenACCClause *> Clauses, Stmt *StructuredBlock, + ArrayRef<OpenACCLoopConstruct *> AssociatedLoopConstructs) { + void *Mem = C.Allocate( + OpenACCComputeConstruct::totalSizeToAlloc<const OpenACCClause *>( + Clauses.size())); + auto *Inst = new (Mem) OpenACCComputeConstruct(K, BeginLoc, DirLoc, EndLoc, + Clauses, StructuredBlock); + + llvm::for_each(AssociatedLoopConstructs, [&](OpenACCLoopConstruct *C) { + C->setParentComputeConstruct(Inst); + }); + + return Inst; +} + +void OpenACCComputeConstruct::findAndSetChildLoops() { + struct LoopConstructFinder : RecursiveASTVisitor<LoopConstructFinder> { + OpenACCComputeConstruct *Construct = nullptr; + + LoopConstructFinder(OpenACCComputeConstruct *Construct) + : Construct(Construct) {} + + bool TraverseOpenACCComputeConstruct(OpenACCComputeConstruct *C) { + // Stop searching if we find a compute construct. + return true; + } + bool TraverseOpenACCLoopConstruct(OpenACCLoopConstruct *C) { + // Stop searching if we find a loop construct, after taking ownership of + // it. + C->setParentComputeConstruct(Construct); + return true; + } + }; + + LoopConstructFinder f(this); + f.TraverseStmt(getAssociatedStmt()); +} + +OpenACCLoopConstruct::OpenACCLoopConstruct(unsigned NumClauses) + : OpenACCAssociatedStmtConstruct( + OpenACCLoopConstructClass, OpenACCDirectiveKind::Loop, + SourceLocation{}, SourceLocation{}, SourceLocation{}, + /*AssociatedStmt=*/nullptr) { + std::uninitialized_value_construct( + getTrailingObjects<const OpenACCClause *>(), + getTrailingObjects<const OpenACCClause *>() + NumClauses); + setClauseList( + MutableArrayRef(getTrailingObjects<const OpenACCClause *>(), NumClauses)); +} + +OpenACCLoopConstruct::OpenACCLoopConstruct( + SourceLocation Start, SourceLocation DirLoc, SourceLocation End, + ArrayRef<const OpenACCClause *> Clauses, Stmt *Loop) + : OpenACCAssociatedStmtConstruct(OpenACCLoopConstructClass, + OpenACCDirectiveKind::Loop, Start, DirLoc, + End, Loop) { + // accept 'nullptr' for the loop. This is diagnosed somewhere, but this gives + // us some level of AST fidelity in the error case. + assert((Loop == nullptr || isa<ForStmt, CXXForRangeStmt>(Loop)) && + "Associated Loop not a for loop?"); + // Initialize the trailing storage. + std::uninitialized_copy(Clauses.begin(), Clauses.end(), + getTrailingObjects<const OpenACCClause *>()); + + setClauseList(MutableArrayRef(getTrailingObjects<const OpenACCClause *>(), + Clauses.size())); +} + +void OpenACCLoopConstruct::setLoop(Stmt *Loop) { + assert((isa<ForStmt, CXXForRangeStmt>(Loop)) && + "Associated Loop not a for loop?"); + setAssociatedStmt(Loop); +} + +OpenACCLoopConstruct *OpenACCLoopConstruct::CreateEmpty(const ASTContext &C, + unsigned NumClauses) { + void *Mem = + C.Allocate(OpenACCLoopConstruct::totalSizeToAlloc<const OpenACCClause *>( + NumClauses)); + auto *Inst = new (Mem) OpenACCLoopConstruct(NumClauses); + return Inst; +} + +OpenACCLoopConstruct * +OpenACCLoopConstruct::Create(const ASTContext &C, SourceLocation BeginLoc, + SourceLocation DirLoc, SourceLocation EndLoc, + ArrayRef<const OpenACCClause *> Clauses, + Stmt *Loop) { + void *Mem = + C.Allocate(OpenACCLoopConstruct::totalSizeToAlloc<const OpenACCClause *>( + Clauses.size())); + auto *Inst = + new (Mem) OpenACCLoopConstruct(BeginLoc, DirLoc, EndLoc, Clauses, Loop); + return Inst; +} diff --git a/contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp b/contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp index b0ef2f49ba04..a2325b177d41 100644 --- a/contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp +++ b/contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp @@ -6,7 +6,7 @@ // //===----------------------------------------------------------------------===// // -// This file implements the subclesses of Stmt class declared in StmtOpenMP.h +// This file implements the subclasses of Stmt class declared in StmtOpenMP.h // //===----------------------------------------------------------------------===// @@ -31,7 +31,7 @@ void OMPChildren::setClauses(ArrayRef<OMPClause *> Clauses) { } MutableArrayRef<Stmt *> OMPChildren::getChildren() { - return llvm::makeMutableArrayRef(getTrailingObjects<Stmt *>(), NumChildren); + return llvm::MutableArrayRef(getTrailingObjects<Stmt *>(), NumChildren); } OMPChildren *OMPChildren::Create(void *Mem, ArrayRef<OMPClause *> Clauses) { @@ -125,28 +125,34 @@ OMPLoopBasedDirective::tryToFindNextInnerLoop(Stmt *CurStmt, bool OMPLoopBasedDirective::doForAllLoops( Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref<bool(unsigned, Stmt *)> Callback, - llvm::function_ref<void(OMPLoopBasedDirective *)> + llvm::function_ref<void(OMPLoopTransformationDirective *)> OnTransformationCallback) { CurStmt = CurStmt->IgnoreContainers(); for (unsigned Cnt = 0; Cnt < NumLoops; ++Cnt) { while (true) { - auto *OrigStmt = CurStmt; - if (auto *Dir = dyn_cast<OMPTileDirective>(OrigStmt)) { - OnTransformationCallback(Dir); - CurStmt = Dir->getTransformedStmt(); - } else if (auto *Dir = dyn_cast<OMPUnrollDirective>(OrigStmt)) { - OnTransformationCallback(Dir); - CurStmt = Dir->getTransformedStmt(); - } else { + auto *Dir = dyn_cast<OMPLoopTransformationDirective>(CurStmt); + if (!Dir) break; - } - if (!CurStmt) { - // May happen if the loop transformation does not result in a generated - // loop (such as full unrolling). - CurStmt = OrigStmt; - break; + OnTransformationCallback(Dir); + + Stmt *TransformedStmt = Dir->getTransformedStmt(); + if (!TransformedStmt) { + unsigned NumGeneratedLoops = Dir->getNumGeneratedLoops(); + if (NumGeneratedLoops == 0) { + // May happen if the loop transformation does not result in a + // generated loop (such as full unrolling). + break; + } + if (NumGeneratedLoops > 0) { + // The loop transformation construct has generated loops, but these + // may not have been generated yet due to being in a dependent + // context. + return true; + } } + + CurStmt = TransformedStmt; } if (auto *CanonLoop = dyn_cast<OMPCanonicalLoop>(CurStmt)) CurStmt = CanonLoop->getLoopStmt(); @@ -253,6 +259,25 @@ void OMPLoopDirective::setFinalsConditions(ArrayRef<Expr *> A) { llvm::copy(A, getFinalsConditions().begin()); } +OMPMetaDirective *OMPMetaDirective::Create(const ASTContext &C, + SourceLocation StartLoc, + SourceLocation EndLoc, + ArrayRef<OMPClause *> Clauses, + Stmt *AssociatedStmt, Stmt *IfStmt) { + auto *Dir = createDirective<OMPMetaDirective>( + C, Clauses, AssociatedStmt, /*NumChildren=*/1, StartLoc, EndLoc); + Dir->setIfStmt(IfStmt); + return Dir; +} + +OMPMetaDirective *OMPMetaDirective::CreateEmpty(const ASTContext &C, + unsigned NumClauses, + EmptyShell) { + return createEmptyDirective<OMPMetaDirective>(C, NumClauses, + /*HasAssociatedStmt=*/true, + /*NumChildren=*/1); +} + OMPParallelDirective *OMPParallelDirective::Create( const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef, @@ -272,11 +297,10 @@ OMPParallelDirective *OMPParallelDirective::CreateEmpty(const ASTContext &C, /*NumChildren=*/1); } -OMPSimdDirective * -OMPSimdDirective::Create(const ASTContext &C, SourceLocation StartLoc, - SourceLocation EndLoc, unsigned CollapsedNum, - ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, - const HelperExprs &Exprs) { +OMPSimdDirective *OMPSimdDirective::Create( + const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, + unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, + const HelperExprs &Exprs, OpenMPDirectiveKind ParamPrevMappedDirective) { auto *Dir = createDirective<OMPSimdDirective>( C, Clauses, AssociatedStmt, numLoopChildren(CollapsedNum, OMPD_simd), StartLoc, EndLoc, CollapsedNum); @@ -296,6 +320,7 @@ OMPSimdDirective::Create(const ASTContext &C, SourceLocation StartLoc, Dir->setDependentInits(Exprs.DependentInits); Dir->setFinalsConditions(Exprs.FinalsConditions); Dir->setPreInits(Exprs.PreInits); + Dir->setMappedDirective(ParamPrevMappedDirective); return Dir; } @@ -311,7 +336,8 @@ OMPSimdDirective *OMPSimdDirective::CreateEmpty(const ASTContext &C, OMPForDirective *OMPForDirective::Create( const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, - const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel) { + const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel, + OpenMPDirectiveKind ParamPrevMappedDirective) { auto *Dir = createDirective<OMPForDirective>( C, Clauses, AssociatedStmt, numLoopChildren(CollapsedNum, OMPD_for) + 1, StartLoc, EndLoc, CollapsedNum); @@ -341,9 +367,36 @@ OMPForDirective *OMPForDirective::Create( Dir->setPreInits(Exprs.PreInits); Dir->setTaskReductionRefExpr(TaskRedRef); Dir->setHasCancel(HasCancel); + Dir->setMappedDirective(ParamPrevMappedDirective); return Dir; } +Stmt *OMPLoopTransformationDirective::getTransformedStmt() const { + switch (getStmtClass()) { +#define STMT(CLASS, PARENT) +#define ABSTRACT_STMT(CLASS) +#define OMPLOOPTRANSFORMATIONDIRECTIVE(CLASS, PARENT) \ + case Stmt::CLASS##Class: \ + return static_cast<const CLASS *>(this)->getTransformedStmt(); +#include "clang/AST/StmtNodes.inc" + default: + llvm_unreachable("Not a loop transformation"); + } +} + +Stmt *OMPLoopTransformationDirective::getPreInits() const { + switch (getStmtClass()) { +#define STMT(CLASS, PARENT) +#define ABSTRACT_STMT(CLASS) +#define OMPLOOPTRANSFORMATIONDIRECTIVE(CLASS, PARENT) \ + case Stmt::CLASS##Class: \ + return static_cast<const CLASS *>(this)->getPreInits(); +#include "clang/AST/StmtNodes.inc" + default: + llvm_unreachable("Not a loop transformation"); + } +} + OMPForDirective *OMPForDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, @@ -377,10 +430,13 @@ OMPTileDirective *OMPTileDirective::CreateEmpty(const ASTContext &C, OMPUnrollDirective * OMPUnrollDirective::Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, - Stmt *AssociatedStmt, Stmt *TransformedStmt, - Stmt *PreInits) { + Stmt *AssociatedStmt, unsigned NumGeneratedLoops, + Stmt *TransformedStmt, Stmt *PreInits) { + assert(NumGeneratedLoops <= 1 && "Unrolling generates at most one loop"); + auto *Dir = createDirective<OMPUnrollDirective>( C, Clauses, AssociatedStmt, TransformedStmtOffset + 1, StartLoc, EndLoc); + Dir->setNumGeneratedLoops(NumGeneratedLoops); Dir->setTransformedStmt(TransformedStmt); Dir->setPreInits(PreInits); return Dir; @@ -393,6 +449,44 @@ OMPUnrollDirective *OMPUnrollDirective::CreateEmpty(const ASTContext &C, SourceLocation(), SourceLocation()); } +OMPReverseDirective * +OMPReverseDirective::Create(const ASTContext &C, SourceLocation StartLoc, + SourceLocation EndLoc, Stmt *AssociatedStmt, + Stmt *TransformedStmt, Stmt *PreInits) { + OMPReverseDirective *Dir = createDirective<OMPReverseDirective>( + C, std::nullopt, AssociatedStmt, TransformedStmtOffset + 1, StartLoc, + EndLoc); + Dir->setTransformedStmt(TransformedStmt); + Dir->setPreInits(PreInits); + return Dir; +} + +OMPReverseDirective *OMPReverseDirective::CreateEmpty(const ASTContext &C) { + return createEmptyDirective<OMPReverseDirective>( + C, /*NumClauses=*/0, /*HasAssociatedStmt=*/true, + TransformedStmtOffset + 1, SourceLocation(), SourceLocation()); +} + +OMPInterchangeDirective *OMPInterchangeDirective::Create( + const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, + ArrayRef<OMPClause *> Clauses, unsigned NumLoops, Stmt *AssociatedStmt, + Stmt *TransformedStmt, Stmt *PreInits) { + OMPInterchangeDirective *Dir = createDirective<OMPInterchangeDirective>( + C, Clauses, AssociatedStmt, TransformedStmtOffset + 1, StartLoc, EndLoc, + NumLoops); + Dir->setTransformedStmt(TransformedStmt); + Dir->setPreInits(PreInits); + return Dir; +} + +OMPInterchangeDirective * +OMPInterchangeDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses, + unsigned NumLoops) { + return createEmptyDirective<OMPInterchangeDirective>( + C, NumClauses, /*HasAssociatedStmt=*/true, TransformedStmtOffset + 1, + SourceLocation(), SourceLocation(), NumLoops); +} + OMPForSimdDirective * OMPForSimdDirective::Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, @@ -463,8 +557,8 @@ OMPSectionDirective *OMPSectionDirective::Create(const ASTContext &C, Stmt *AssociatedStmt, bool HasCancel) { auto *Dir = - createDirective<OMPSectionDirective>(C, llvm::None, AssociatedStmt, - /*NumChildre=*/0, StartLoc, EndLoc); + createDirective<OMPSectionDirective>(C, std::nullopt, AssociatedStmt, + /*NumChildren=*/0, StartLoc, EndLoc); Dir->setHasCancel(HasCancel); return Dir; } @@ -475,6 +569,23 @@ OMPSectionDirective *OMPSectionDirective::CreateEmpty(const ASTContext &C, /*HasAssociatedStmt=*/true); } +OMPScopeDirective *OMPScopeDirective::Create(const ASTContext &C, + SourceLocation StartLoc, + SourceLocation EndLoc, + ArrayRef<OMPClause *> Clauses, + Stmt *AssociatedStmt) { + return createDirective<OMPScopeDirective>(C, Clauses, AssociatedStmt, + /*NumChildren=*/0, StartLoc, + EndLoc); +} + +OMPScopeDirective *OMPScopeDirective::CreateEmpty(const ASTContext &C, + unsigned NumClauses, + EmptyShell) { + return createEmptyDirective<OMPScopeDirective>(C, NumClauses, + /*HasAssociatedStmt=*/true); +} + OMPSingleDirective *OMPSingleDirective::Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, @@ -496,7 +607,7 @@ OMPMasterDirective *OMPMasterDirective::Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt) { - return createDirective<OMPMasterDirective>(C, llvm::None, AssociatedStmt, + return createDirective<OMPMasterDirective>(C, std::nullopt, AssociatedStmt, /*NumChildren=*/0, StartLoc, EndLoc); } @@ -628,6 +739,22 @@ OMPParallelMasterDirective::CreateEmpty(const ASTContext &C, C, NumClauses, /*HasAssociatedStmt=*/true, /*NumChildren=*/1); } +OMPParallelMaskedDirective *OMPParallelMaskedDirective::Create( + const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, + ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef) { + auto *Dir = createDirective<OMPParallelMaskedDirective>( + C, Clauses, AssociatedStmt, /*NumChildren=*/1, StartLoc, EndLoc); + Dir->setTaskReductionRefExpr(TaskRedRef); + return Dir; +} + +OMPParallelMaskedDirective * +OMPParallelMaskedDirective::CreateEmpty(const ASTContext &C, + unsigned NumClauses, EmptyShell) { + return createEmptyDirective<OMPParallelMaskedDirective>( + C, NumClauses, /*HasAssociatedStmt=*/true, /*NumChildren=*/1); +} + OMPParallelSectionsDirective *OMPParallelSectionsDirective::Create( const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef, @@ -674,6 +801,21 @@ OMPTaskyieldDirective *OMPTaskyieldDirective::CreateEmpty(const ASTContext &C, return new (C) OMPTaskyieldDirective(); } +OMPErrorDirective *OMPErrorDirective::Create(const ASTContext &C, + SourceLocation StartLoc, + SourceLocation EndLoc, + ArrayRef<OMPClause *> Clauses) { + return createDirective<OMPErrorDirective>( + C, Clauses, /*AssociatedStmt=*/nullptr, /*NumChildren=*/0, StartLoc, + EndLoc); +} + +OMPErrorDirective *OMPErrorDirective::CreateEmpty(const ASTContext &C, + unsigned NumClauses, + EmptyShell) { + return createEmptyDirective<OMPErrorDirective>(C, NumClauses); +} + OMPBarrierDirective *OMPBarrierDirective::Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc) { @@ -685,15 +827,19 @@ OMPBarrierDirective *OMPBarrierDirective::CreateEmpty(const ASTContext &C, return new (C) OMPBarrierDirective(); } -OMPTaskwaitDirective *OMPTaskwaitDirective::Create(const ASTContext &C, - SourceLocation StartLoc, - SourceLocation EndLoc) { - return new (C) OMPTaskwaitDirective(StartLoc, EndLoc); +OMPTaskwaitDirective * +OMPTaskwaitDirective::Create(const ASTContext &C, SourceLocation StartLoc, + SourceLocation EndLoc, + ArrayRef<OMPClause *> Clauses) { + return createDirective<OMPTaskwaitDirective>( + C, Clauses, /*AssociatedStmt=*/nullptr, /*NumChildren=*/0, StartLoc, + EndLoc); } OMPTaskwaitDirective *OMPTaskwaitDirective::CreateEmpty(const ASTContext &C, + unsigned NumClauses, EmptyShell) { - return new (C) OMPTaskwaitDirective(); + return createEmptyDirective<OMPTaskwaitDirective>(C, NumClauses); } OMPTaskgroupDirective *OMPTaskgroupDirective::Create( @@ -805,18 +951,22 @@ OMPOrderedDirective *OMPOrderedDirective::CreateEmpty(const ASTContext &C, !IsStandalone); } -OMPAtomicDirective *OMPAtomicDirective::Create( - const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, - ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V, - Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate) { +OMPAtomicDirective * +OMPAtomicDirective::Create(const ASTContext &C, SourceLocation StartLoc, + SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, + Stmt *AssociatedStmt, Expressions Exprs) { auto *Dir = createDirective<OMPAtomicDirective>( - C, Clauses, AssociatedStmt, /*NumChildren=*/4, StartLoc, EndLoc); - Dir->setX(X); - Dir->setV(V); - Dir->setExpr(E); - Dir->setUpdateExpr(UE); - Dir->IsXLHSInRHSPart = IsXLHSInRHSPart; - Dir->IsPostfixUpdate = IsPostfixUpdate; + C, Clauses, AssociatedStmt, /*NumChildren=*/7, StartLoc, EndLoc); + Dir->setX(Exprs.X); + Dir->setV(Exprs.V); + Dir->setR(Exprs.R); + Dir->setExpr(Exprs.E); + Dir->setUpdateExpr(Exprs.UE); + Dir->setD(Exprs.D); + Dir->setCond(Exprs.Cond); + Dir->Flags.IsXLHSInRHSPart = Exprs.IsXLHSInRHSPart ? 1 : 0; + Dir->Flags.IsPostfixUpdate = Exprs.IsPostfixUpdate ? 1 : 0; + Dir->Flags.IsFailOnly = Exprs.IsFailOnly ? 1 : 0; return Dir; } @@ -824,7 +974,7 @@ OMPAtomicDirective *OMPAtomicDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell) { return createEmptyDirective<OMPAtomicDirective>( - C, NumClauses, /*HasAssociatedStmt=*/true, /*NumChildren=*/4); + C, NumClauses, /*HasAssociatedStmt=*/true, /*NumChildren=*/7); } OMPTargetDirective *OMPTargetDirective::Create(const ASTContext &C, @@ -1098,6 +1248,51 @@ OMPMasterTaskLoopDirective::CreateEmpty(const ASTContext &C, numLoopChildren(CollapsedNum, OMPD_master_taskloop), CollapsedNum); } +OMPMaskedTaskLoopDirective *OMPMaskedTaskLoopDirective::Create( + const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, + unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, + const HelperExprs &Exprs, bool HasCancel) { + auto *Dir = createDirective<OMPMaskedTaskLoopDirective>( + C, Clauses, AssociatedStmt, + numLoopChildren(CollapsedNum, OMPD_masked_taskloop), StartLoc, EndLoc, + CollapsedNum); + Dir->setIterationVariable(Exprs.IterationVarRef); + Dir->setLastIteration(Exprs.LastIteration); + Dir->setCalcLastIteration(Exprs.CalcLastIteration); + Dir->setPreCond(Exprs.PreCond); + Dir->setCond(Exprs.Cond); + Dir->setInit(Exprs.Init); + Dir->setInc(Exprs.Inc); + Dir->setIsLastIterVariable(Exprs.IL); + Dir->setLowerBoundVariable(Exprs.LB); + Dir->setUpperBoundVariable(Exprs.UB); + Dir->setStrideVariable(Exprs.ST); + Dir->setEnsureUpperBound(Exprs.EUB); + Dir->setNextLowerBound(Exprs.NLB); + Dir->setNextUpperBound(Exprs.NUB); + Dir->setNumIterations(Exprs.NumIterations); + Dir->setCounters(Exprs.Counters); + Dir->setPrivateCounters(Exprs.PrivateCounters); + Dir->setInits(Exprs.Inits); + Dir->setUpdates(Exprs.Updates); + Dir->setFinals(Exprs.Finals); + Dir->setDependentCounters(Exprs.DependentCounters); + Dir->setDependentInits(Exprs.DependentInits); + Dir->setFinalsConditions(Exprs.FinalsConditions); + Dir->setPreInits(Exprs.PreInits); + Dir->setHasCancel(HasCancel); + return Dir; +} + +OMPMaskedTaskLoopDirective * +OMPMaskedTaskLoopDirective::CreateEmpty(const ASTContext &C, + unsigned NumClauses, + unsigned CollapsedNum, EmptyShell) { + return createEmptyDirective<OMPMaskedTaskLoopDirective>( + C, NumClauses, /*HasAssociatedStmt=*/true, + numLoopChildren(CollapsedNum, OMPD_masked_taskloop), CollapsedNum); +} + OMPMasterTaskLoopSimdDirective *OMPMasterTaskLoopSimdDirective::Create( const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, @@ -1142,6 +1337,50 @@ OMPMasterTaskLoopSimdDirective::CreateEmpty(const ASTContext &C, numLoopChildren(CollapsedNum, OMPD_master_taskloop_simd), CollapsedNum); } +OMPMaskedTaskLoopSimdDirective *OMPMaskedTaskLoopSimdDirective::Create( + const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, + unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, + const HelperExprs &Exprs) { + auto *Dir = createDirective<OMPMaskedTaskLoopSimdDirective>( + C, Clauses, AssociatedStmt, + numLoopChildren(CollapsedNum, OMPD_masked_taskloop_simd), StartLoc, + EndLoc, CollapsedNum); + Dir->setIterationVariable(Exprs.IterationVarRef); + Dir->setLastIteration(Exprs.LastIteration); + Dir->setCalcLastIteration(Exprs.CalcLastIteration); + Dir->setPreCond(Exprs.PreCond); + Dir->setCond(Exprs.Cond); + Dir->setInit(Exprs.Init); + Dir->setInc(Exprs.Inc); + Dir->setIsLastIterVariable(Exprs.IL); + Dir->setLowerBoundVariable(Exprs.LB); + Dir->setUpperBoundVariable(Exprs.UB); + Dir->setStrideVariable(Exprs.ST); + Dir->setEnsureUpperBound(Exprs.EUB); + Dir->setNextLowerBound(Exprs.NLB); + Dir->setNextUpperBound(Exprs.NUB); + Dir->setNumIterations(Exprs.NumIterations); + Dir->setCounters(Exprs.Counters); + Dir->setPrivateCounters(Exprs.PrivateCounters); + Dir->setInits(Exprs.Inits); + Dir->setUpdates(Exprs.Updates); + Dir->setFinals(Exprs.Finals); + Dir->setDependentCounters(Exprs.DependentCounters); + Dir->setDependentInits(Exprs.DependentInits); + Dir->setFinalsConditions(Exprs.FinalsConditions); + Dir->setPreInits(Exprs.PreInits); + return Dir; +} + +OMPMaskedTaskLoopSimdDirective * +OMPMaskedTaskLoopSimdDirective::CreateEmpty(const ASTContext &C, + unsigned NumClauses, + unsigned CollapsedNum, EmptyShell) { + return createEmptyDirective<OMPMaskedTaskLoopSimdDirective>( + C, NumClauses, /*HasAssociatedStmt=*/true, + numLoopChildren(CollapsedNum, OMPD_masked_taskloop_simd), CollapsedNum); +} + OMPParallelMasterTaskLoopDirective *OMPParallelMasterTaskLoopDirective::Create( const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, @@ -1189,6 +1428,53 @@ OMPParallelMasterTaskLoopDirective::CreateEmpty(const ASTContext &C, CollapsedNum); } +OMPParallelMaskedTaskLoopDirective *OMPParallelMaskedTaskLoopDirective::Create( + const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, + unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, + const HelperExprs &Exprs, bool HasCancel) { + auto *Dir = createDirective<OMPParallelMaskedTaskLoopDirective>( + C, Clauses, AssociatedStmt, + numLoopChildren(CollapsedNum, OMPD_parallel_masked_taskloop), StartLoc, + EndLoc, CollapsedNum); + Dir->setIterationVariable(Exprs.IterationVarRef); + Dir->setLastIteration(Exprs.LastIteration); + Dir->setCalcLastIteration(Exprs.CalcLastIteration); + Dir->setPreCond(Exprs.PreCond); + Dir->setCond(Exprs.Cond); + Dir->setInit(Exprs.Init); + Dir->setInc(Exprs.Inc); + Dir->setIsLastIterVariable(Exprs.IL); + Dir->setLowerBoundVariable(Exprs.LB); + Dir->setUpperBoundVariable(Exprs.UB); + Dir->setStrideVariable(Exprs.ST); + Dir->setEnsureUpperBound(Exprs.EUB); + Dir->setNextLowerBound(Exprs.NLB); + Dir->setNextUpperBound(Exprs.NUB); + Dir->setNumIterations(Exprs.NumIterations); + Dir->setCounters(Exprs.Counters); + Dir->setPrivateCounters(Exprs.PrivateCounters); + Dir->setInits(Exprs.Inits); + Dir->setUpdates(Exprs.Updates); + Dir->setFinals(Exprs.Finals); + Dir->setDependentCounters(Exprs.DependentCounters); + Dir->setDependentInits(Exprs.DependentInits); + Dir->setFinalsConditions(Exprs.FinalsConditions); + Dir->setPreInits(Exprs.PreInits); + Dir->setHasCancel(HasCancel); + return Dir; +} + +OMPParallelMaskedTaskLoopDirective * +OMPParallelMaskedTaskLoopDirective::CreateEmpty(const ASTContext &C, + unsigned NumClauses, + unsigned CollapsedNum, + EmptyShell) { + return createEmptyDirective<OMPParallelMaskedTaskLoopDirective>( + C, NumClauses, /*HasAssociatedStmt=*/true, + numLoopChildren(CollapsedNum, OMPD_parallel_masked_taskloop), + CollapsedNum); +} + OMPParallelMasterTaskLoopSimdDirective * OMPParallelMasterTaskLoopSimdDirective::Create( const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, @@ -1236,10 +1522,57 @@ OMPParallelMasterTaskLoopSimdDirective::CreateEmpty(const ASTContext &C, CollapsedNum); } -OMPDistributeDirective *OMPDistributeDirective::Create( +OMPParallelMaskedTaskLoopSimdDirective * +OMPParallelMaskedTaskLoopSimdDirective::Create( const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs) { + auto *Dir = createDirective<OMPParallelMaskedTaskLoopSimdDirective>( + C, Clauses, AssociatedStmt, + numLoopChildren(CollapsedNum, OMPD_parallel_masked_taskloop_simd), + StartLoc, EndLoc, CollapsedNum); + Dir->setIterationVariable(Exprs.IterationVarRef); + Dir->setLastIteration(Exprs.LastIteration); + Dir->setCalcLastIteration(Exprs.CalcLastIteration); + Dir->setPreCond(Exprs.PreCond); + Dir->setCond(Exprs.Cond); + Dir->setInit(Exprs.Init); + Dir->setInc(Exprs.Inc); + Dir->setIsLastIterVariable(Exprs.IL); + Dir->setLowerBoundVariable(Exprs.LB); + Dir->setUpperBoundVariable(Exprs.UB); + Dir->setStrideVariable(Exprs.ST); + Dir->setEnsureUpperBound(Exprs.EUB); + Dir->setNextLowerBound(Exprs.NLB); + Dir->setNextUpperBound(Exprs.NUB); + Dir->setNumIterations(Exprs.NumIterations); + Dir->setCounters(Exprs.Counters); + Dir->setPrivateCounters(Exprs.PrivateCounters); + Dir->setInits(Exprs.Inits); + Dir->setUpdates(Exprs.Updates); + Dir->setFinals(Exprs.Finals); + Dir->setDependentCounters(Exprs.DependentCounters); + Dir->setDependentInits(Exprs.DependentInits); + Dir->setFinalsConditions(Exprs.FinalsConditions); + Dir->setPreInits(Exprs.PreInits); + return Dir; +} + +OMPParallelMaskedTaskLoopSimdDirective * +OMPParallelMaskedTaskLoopSimdDirective::CreateEmpty(const ASTContext &C, + unsigned NumClauses, + unsigned CollapsedNum, + EmptyShell) { + return createEmptyDirective<OMPParallelMaskedTaskLoopSimdDirective>( + C, NumClauses, /*HasAssociatedStmt=*/true, + numLoopChildren(CollapsedNum, OMPD_parallel_masked_taskloop_simd), + CollapsedNum); +} + +OMPDistributeDirective *OMPDistributeDirective::Create( + const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, + unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, + const HelperExprs &Exprs, OpenMPDirectiveKind ParamPrevMappedDirective) { auto *Dir = createDirective<OMPDistributeDirective>( C, Clauses, AssociatedStmt, numLoopChildren(CollapsedNum, OMPD_distribute), StartLoc, EndLoc, @@ -1268,6 +1601,7 @@ OMPDistributeDirective *OMPDistributeDirective::Create( Dir->setDependentInits(Exprs.DependentInits); Dir->setFinalsConditions(Exprs.FinalsConditions); Dir->setPreInits(Exprs.PreInits); + Dir->setMappedDirective(ParamPrevMappedDirective); return Dir; } @@ -2032,3 +2366,250 @@ OMPMaskedDirective *OMPMaskedDirective::CreateEmpty(const ASTContext &C, return createEmptyDirective<OMPMaskedDirective>(C, NumClauses, /*HasAssociatedStmt=*/true); } + +OMPGenericLoopDirective *OMPGenericLoopDirective::Create( + const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, + unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, + const HelperExprs &Exprs) { + auto *Dir = createDirective<OMPGenericLoopDirective>( + C, Clauses, AssociatedStmt, numLoopChildren(CollapsedNum, OMPD_loop), + StartLoc, EndLoc, CollapsedNum); + Dir->setIterationVariable(Exprs.IterationVarRef); + Dir->setLastIteration(Exprs.LastIteration); + Dir->setCalcLastIteration(Exprs.CalcLastIteration); + Dir->setPreCond(Exprs.PreCond); + Dir->setCond(Exprs.Cond); + Dir->setInit(Exprs.Init); + Dir->setInc(Exprs.Inc); + Dir->setIsLastIterVariable(Exprs.IL); + Dir->setLowerBoundVariable(Exprs.LB); + Dir->setUpperBoundVariable(Exprs.UB); + Dir->setStrideVariable(Exprs.ST); + Dir->setEnsureUpperBound(Exprs.EUB); + Dir->setNextLowerBound(Exprs.NLB); + Dir->setNextUpperBound(Exprs.NUB); + Dir->setNumIterations(Exprs.NumIterations); + Dir->setCounters(Exprs.Counters); + Dir->setPrivateCounters(Exprs.PrivateCounters); + Dir->setInits(Exprs.Inits); + Dir->setUpdates(Exprs.Updates); + Dir->setFinals(Exprs.Finals); + Dir->setDependentCounters(Exprs.DependentCounters); + Dir->setDependentInits(Exprs.DependentInits); + Dir->setFinalsConditions(Exprs.FinalsConditions); + Dir->setPreInits(Exprs.PreInits); + return Dir; +} + +OMPGenericLoopDirective * +OMPGenericLoopDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses, + unsigned CollapsedNum, EmptyShell) { + return createEmptyDirective<OMPGenericLoopDirective>( + C, NumClauses, /*HasAssociatedStmt=*/true, + numLoopChildren(CollapsedNum, OMPD_loop), CollapsedNum); +} + +OMPTeamsGenericLoopDirective *OMPTeamsGenericLoopDirective::Create( + const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, + unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, + const HelperExprs &Exprs) { + auto *Dir = createDirective<OMPTeamsGenericLoopDirective>( + C, Clauses, AssociatedStmt, + numLoopChildren(CollapsedNum, OMPD_teams_loop), StartLoc, EndLoc, + CollapsedNum); + Dir->setIterationVariable(Exprs.IterationVarRef); + Dir->setLastIteration(Exprs.LastIteration); + Dir->setCalcLastIteration(Exprs.CalcLastIteration); + Dir->setPreCond(Exprs.PreCond); + Dir->setCond(Exprs.Cond); + Dir->setInit(Exprs.Init); + Dir->setInc(Exprs.Inc); + Dir->setIsLastIterVariable(Exprs.IL); + Dir->setLowerBoundVariable(Exprs.LB); + Dir->setUpperBoundVariable(Exprs.UB); + Dir->setStrideVariable(Exprs.ST); + Dir->setEnsureUpperBound(Exprs.EUB); + Dir->setNextLowerBound(Exprs.NLB); + Dir->setNextUpperBound(Exprs.NUB); + Dir->setNumIterations(Exprs.NumIterations); + Dir->setPrevLowerBoundVariable(Exprs.PrevLB); + Dir->setPrevUpperBoundVariable(Exprs.PrevUB); + Dir->setDistInc(Exprs.DistInc); + Dir->setPrevEnsureUpperBound(Exprs.PrevEUB); + Dir->setCounters(Exprs.Counters); + Dir->setPrivateCounters(Exprs.PrivateCounters); + Dir->setInits(Exprs.Inits); + Dir->setUpdates(Exprs.Updates); + Dir->setFinals(Exprs.Finals); + Dir->setDependentCounters(Exprs.DependentCounters); + Dir->setDependentInits(Exprs.DependentInits); + Dir->setFinalsConditions(Exprs.FinalsConditions); + Dir->setPreInits(Exprs.PreInits); + Dir->setCombinedLowerBoundVariable(Exprs.DistCombinedFields.LB); + Dir->setCombinedUpperBoundVariable(Exprs.DistCombinedFields.UB); + Dir->setCombinedEnsureUpperBound(Exprs.DistCombinedFields.EUB); + Dir->setCombinedInit(Exprs.DistCombinedFields.Init); + Dir->setCombinedCond(Exprs.DistCombinedFields.Cond); + Dir->setCombinedNextLowerBound(Exprs.DistCombinedFields.NLB); + Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB); + Dir->setCombinedDistCond(Exprs.DistCombinedFields.DistCond); + Dir->setCombinedParForInDistCond(Exprs.DistCombinedFields.ParForInDistCond); + return Dir; +} + +OMPTeamsGenericLoopDirective * +OMPTeamsGenericLoopDirective::CreateEmpty(const ASTContext &C, + unsigned NumClauses, + unsigned CollapsedNum, EmptyShell) { + return createEmptyDirective<OMPTeamsGenericLoopDirective>( + C, NumClauses, /*HasAssociatedStmt=*/true, + numLoopChildren(CollapsedNum, OMPD_teams_loop), CollapsedNum); +} + +OMPTargetTeamsGenericLoopDirective *OMPTargetTeamsGenericLoopDirective::Create( + const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, + unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, + const HelperExprs &Exprs, bool CanBeParallelFor) { + auto *Dir = createDirective<OMPTargetTeamsGenericLoopDirective>( + C, Clauses, AssociatedStmt, + numLoopChildren(CollapsedNum, OMPD_target_teams_loop), StartLoc, EndLoc, + CollapsedNum); + Dir->setIterationVariable(Exprs.IterationVarRef); + Dir->setLastIteration(Exprs.LastIteration); + Dir->setCalcLastIteration(Exprs.CalcLastIteration); + Dir->setPreCond(Exprs.PreCond); + Dir->setCond(Exprs.Cond); + Dir->setInit(Exprs.Init); + Dir->setInc(Exprs.Inc); + Dir->setIsLastIterVariable(Exprs.IL); + Dir->setLowerBoundVariable(Exprs.LB); + Dir->setUpperBoundVariable(Exprs.UB); + Dir->setStrideVariable(Exprs.ST); + Dir->setEnsureUpperBound(Exprs.EUB); + Dir->setNextLowerBound(Exprs.NLB); + Dir->setNextUpperBound(Exprs.NUB); + Dir->setNumIterations(Exprs.NumIterations); + Dir->setPrevLowerBoundVariable(Exprs.PrevLB); + Dir->setPrevUpperBoundVariable(Exprs.PrevUB); + Dir->setDistInc(Exprs.DistInc); + Dir->setPrevEnsureUpperBound(Exprs.PrevEUB); + Dir->setCounters(Exprs.Counters); + Dir->setPrivateCounters(Exprs.PrivateCounters); + Dir->setInits(Exprs.Inits); + Dir->setUpdates(Exprs.Updates); + Dir->setFinals(Exprs.Finals); + Dir->setDependentCounters(Exprs.DependentCounters); + Dir->setDependentInits(Exprs.DependentInits); + Dir->setFinalsConditions(Exprs.FinalsConditions); + Dir->setPreInits(Exprs.PreInits); + Dir->setCombinedLowerBoundVariable(Exprs.DistCombinedFields.LB); + Dir->setCombinedUpperBoundVariable(Exprs.DistCombinedFields.UB); + Dir->setCombinedEnsureUpperBound(Exprs.DistCombinedFields.EUB); + Dir->setCombinedInit(Exprs.DistCombinedFields.Init); + Dir->setCombinedCond(Exprs.DistCombinedFields.Cond); + Dir->setCombinedNextLowerBound(Exprs.DistCombinedFields.NLB); + Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB); + Dir->setCombinedDistCond(Exprs.DistCombinedFields.DistCond); + Dir->setCombinedParForInDistCond(Exprs.DistCombinedFields.ParForInDistCond); + Dir->setCanBeParallelFor(CanBeParallelFor); + return Dir; +} + +OMPTargetTeamsGenericLoopDirective * +OMPTargetTeamsGenericLoopDirective::CreateEmpty(const ASTContext &C, + unsigned NumClauses, + unsigned CollapsedNum, + EmptyShell) { + return createEmptyDirective<OMPTargetTeamsGenericLoopDirective>( + C, NumClauses, /*HasAssociatedStmt=*/true, + numLoopChildren(CollapsedNum, OMPD_target_teams_loop), CollapsedNum); +} + +OMPParallelGenericLoopDirective *OMPParallelGenericLoopDirective::Create( + const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, + unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, + const HelperExprs &Exprs) { + auto *Dir = createDirective<OMPParallelGenericLoopDirective>( + C, Clauses, AssociatedStmt, + numLoopChildren(CollapsedNum, OMPD_parallel_loop), StartLoc, EndLoc, + CollapsedNum); + Dir->setIterationVariable(Exprs.IterationVarRef); + Dir->setLastIteration(Exprs.LastIteration); + Dir->setCalcLastIteration(Exprs.CalcLastIteration); + Dir->setPreCond(Exprs.PreCond); + Dir->setCond(Exprs.Cond); + Dir->setInit(Exprs.Init); + Dir->setInc(Exprs.Inc); + Dir->setIsLastIterVariable(Exprs.IL); + Dir->setLowerBoundVariable(Exprs.LB); + Dir->setUpperBoundVariable(Exprs.UB); + Dir->setStrideVariable(Exprs.ST); + Dir->setEnsureUpperBound(Exprs.EUB); + Dir->setNextLowerBound(Exprs.NLB); + Dir->setNextUpperBound(Exprs.NUB); + Dir->setNumIterations(Exprs.NumIterations); + Dir->setCounters(Exprs.Counters); + Dir->setPrivateCounters(Exprs.PrivateCounters); + Dir->setInits(Exprs.Inits); + Dir->setUpdates(Exprs.Updates); + Dir->setFinals(Exprs.Finals); + Dir->setDependentCounters(Exprs.DependentCounters); + Dir->setDependentInits(Exprs.DependentInits); + Dir->setFinalsConditions(Exprs.FinalsConditions); + Dir->setPreInits(Exprs.PreInits); + return Dir; +} + +OMPParallelGenericLoopDirective *OMPParallelGenericLoopDirective::CreateEmpty( + const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, + EmptyShell) { + return createEmptyDirective<OMPParallelGenericLoopDirective>( + C, NumClauses, /*HasAssociatedStmt=*/true, + numLoopChildren(CollapsedNum, OMPD_parallel_loop), CollapsedNum); +} + +OMPTargetParallelGenericLoopDirective * +OMPTargetParallelGenericLoopDirective::Create( + const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, + unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, + const HelperExprs &Exprs) { + auto *Dir = createDirective<OMPTargetParallelGenericLoopDirective>( + C, Clauses, AssociatedStmt, + numLoopChildren(CollapsedNum, OMPD_target_parallel_loop), StartLoc, + EndLoc, CollapsedNum); + Dir->setIterationVariable(Exprs.IterationVarRef); + Dir->setLastIteration(Exprs.LastIteration); + Dir->setCalcLastIteration(Exprs.CalcLastIteration); + Dir->setPreCond(Exprs.PreCond); + Dir->setCond(Exprs.Cond); + Dir->setInit(Exprs.Init); + Dir->setInc(Exprs.Inc); + Dir->setIsLastIterVariable(Exprs.IL); + Dir->setLowerBoundVariable(Exprs.LB); + Dir->setUpperBoundVariable(Exprs.UB); + Dir->setStrideVariable(Exprs.ST); + Dir->setEnsureUpperBound(Exprs.EUB); + Dir->setNextLowerBound(Exprs.NLB); + Dir->setNextUpperBound(Exprs.NUB); + Dir->setNumIterations(Exprs.NumIterations); + Dir->setCounters(Exprs.Counters); + Dir->setPrivateCounters(Exprs.PrivateCounters); + Dir->setInits(Exprs.Inits); + Dir->setUpdates(Exprs.Updates); + Dir->setFinals(Exprs.Finals); + Dir->setDependentCounters(Exprs.DependentCounters); + Dir->setDependentInits(Exprs.DependentInits); + Dir->setFinalsConditions(Exprs.FinalsConditions); + Dir->setPreInits(Exprs.PreInits); + return Dir; +} + +OMPTargetParallelGenericLoopDirective * +OMPTargetParallelGenericLoopDirective::CreateEmpty(const ASTContext &C, + unsigned NumClauses, + unsigned CollapsedNum, + EmptyShell) { + return createEmptyDirective<OMPTargetParallelGenericLoopDirective>( + C, NumClauses, /*HasAssociatedStmt=*/true, + numLoopChildren(CollapsedNum, OMPD_target_parallel_loop), CollapsedNum); +} diff --git a/contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp b/contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp index 45b15171aa97..69e0b763e8dd 100644 --- a/contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp +++ b/contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp @@ -54,6 +54,7 @@ #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" #include <cassert> +#include <optional> #include <string> using namespace clang; @@ -83,7 +84,7 @@ namespace { void PrintStmt(Stmt *S, int SubIndent) { IndentLevel += SubIndent; - if (S && isa<Expr>(S)) { + if (isa_and_nonnull<Expr>(S)) { // If this is an expr used in a stmt context, indent and newline it. Indent(); Visit(S); @@ -128,6 +129,7 @@ namespace { void PrintRawSEHFinallyStmt(SEHFinallyStmt *S); void PrintOMPExecutableDirective(OMPExecutableDirective *S, bool ForceNoStmt = false); + void PrintFPPragmas(CompoundStmt *S); void PrintExpr(Expr *E) { if (E) @@ -173,13 +175,75 @@ namespace { /// PrintRawCompoundStmt - Print a compound stmt without indenting the {, and /// with no newline after the }. void StmtPrinter::PrintRawCompoundStmt(CompoundStmt *Node) { + assert(Node && "Compound statement cannot be null"); OS << "{" << NL; + PrintFPPragmas(Node); for (auto *I : Node->body()) PrintStmt(I); Indent() << "}"; } +void StmtPrinter::PrintFPPragmas(CompoundStmt *S) { + if (!S->hasStoredFPFeatures()) + return; + FPOptionsOverride FPO = S->getStoredFPFeatures(); + bool FEnvAccess = false; + if (FPO.hasAllowFEnvAccessOverride()) { + FEnvAccess = FPO.getAllowFEnvAccessOverride(); + Indent() << "#pragma STDC FENV_ACCESS " << (FEnvAccess ? "ON" : "OFF") + << NL; + } + if (FPO.hasSpecifiedExceptionModeOverride()) { + LangOptions::FPExceptionModeKind EM = + FPO.getSpecifiedExceptionModeOverride(); + if (!FEnvAccess || EM != LangOptions::FPE_Strict) { + Indent() << "#pragma clang fp exceptions("; + switch (FPO.getSpecifiedExceptionModeOverride()) { + default: + break; + case LangOptions::FPE_Ignore: + OS << "ignore"; + break; + case LangOptions::FPE_MayTrap: + OS << "maytrap"; + break; + case LangOptions::FPE_Strict: + OS << "strict"; + break; + } + OS << ")\n"; + } + } + if (FPO.hasConstRoundingModeOverride()) { + LangOptions::RoundingMode RM = FPO.getConstRoundingModeOverride(); + Indent() << "#pragma STDC FENV_ROUND "; + switch (RM) { + case llvm::RoundingMode::TowardZero: + OS << "FE_TOWARDZERO"; + break; + case llvm::RoundingMode::NearestTiesToEven: + OS << "FE_TONEAREST"; + break; + case llvm::RoundingMode::TowardPositive: + OS << "FE_UPWARD"; + break; + case llvm::RoundingMode::TowardNegative: + OS << "FE_DOWNWARD"; + break; + case llvm::RoundingMode::NearestTiesToAway: + OS << "FE_TONEARESTFROMZERO"; + break; + case llvm::RoundingMode::Dynamic: + OS << "FE_DYNAMIC"; + break; + default: + llvm_unreachable("Invalid rounding mode"); + } + OS << NL; + } +} + void StmtPrinter::PrintRawDecl(Decl *D) { D->print(OS, Policy, IndentLevel); } @@ -228,14 +292,33 @@ void StmtPrinter::VisitLabelStmt(LabelStmt *Node) { } void StmtPrinter::VisitAttributedStmt(AttributedStmt *Node) { - for (const auto *Attr : Node->getAttrs()) { + llvm::ArrayRef<const Attr *> Attrs = Node->getAttrs(); + for (const auto *Attr : Attrs) { Attr->printPretty(OS, Policy); + if (Attr != Attrs.back()) + OS << ' '; } PrintStmt(Node->getSubStmt(), 0); } void StmtPrinter::PrintRawIfStmt(IfStmt *If) { + if (If->isConsteval()) { + OS << "if "; + if (If->isNegatedConsteval()) + OS << "!"; + OS << "consteval"; + OS << NL; + PrintStmt(If->getThen()); + if (Stmt *Else = If->getElse()) { + Indent(); + OS << "else"; + PrintStmt(Else); + OS << NL; + } + return; + } + OS << "if ("; if (If->getInit()) PrintInitStmt(If->getInit(), 4); @@ -321,7 +404,9 @@ void StmtPrinter::VisitForStmt(ForStmt *Node) { PrintInitStmt(Node->getInit(), 5); else OS << (Node->getCond() ? "; " : ";"); - if (Node->getCond()) + if (const DeclStmt *DS = Node->getConditionVariableDeclStmt()) + PrintRawDeclStmt(DS); + else if (Node->getCond()) PrintExpr(Node->getCond()); OS << ";"; if (Node->getInc()) { @@ -505,13 +590,10 @@ void StmtPrinter::VisitObjCAtTryStmt(ObjCAtTryStmt *Node) { OS << NL; } - for (unsigned I = 0, N = Node->getNumCatchStmts(); I != N; ++I) { - ObjCAtCatchStmt *catchStmt = Node->getCatchStmt(I); + for (ObjCAtCatchStmt *catchStmt : Node->catch_stmts()) { Indent() << "@catch("; - if (catchStmt->getCatchParamDecl()) { - if (Decl *DS = catchStmt->getCatchParamDecl()) - PrintRawDecl(DS); - } + if (Decl *DS = catchStmt->getCatchParamDecl()) + PrintRawDecl(DS); OS << ")"; if (auto *CS = dyn_cast<CompoundStmt>(catchStmt->getCatchBody())) { PrintRawCompoundStmt(CS); @@ -521,8 +603,10 @@ void StmtPrinter::VisitObjCAtTryStmt(ObjCAtTryStmt *Node) { if (auto *FS = static_cast<ObjCAtFinallyStmt *>(Node->getFinallyStmt())) { Indent() << "@finally"; - PrintRawCompoundStmt(dyn_cast<CompoundStmt>(FS->getFinallyBody())); - OS << NL; + if (auto *CS = dyn_cast<CompoundStmt>(FS->getFinallyBody())) { + PrintRawCompoundStmt(CS); + OS << NL; + } } } @@ -557,7 +641,7 @@ void StmtPrinter::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *Node) { void StmtPrinter::VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *Node) { Indent() << "@autoreleasepool"; - PrintRawCompoundStmt(dyn_cast<CompoundStmt>(Node->getSubStmt())); + PrintRawCompoundStmt(cast<CompoundStmt>(Node->getSubStmt())); OS << NL; } @@ -654,6 +738,11 @@ void StmtPrinter::PrintOMPExecutableDirective(OMPExecutableDirective *S, PrintStmt(S->getRawStmt()); } +void StmtPrinter::VisitOMPMetaDirective(OMPMetaDirective *Node) { + Indent() << "#pragma omp metadirective"; + PrintOMPExecutableDirective(Node); +} + void StmtPrinter::VisitOMPParallelDirective(OMPParallelDirective *Node) { Indent() << "#pragma omp parallel"; PrintOMPExecutableDirective(Node); @@ -674,6 +763,16 @@ void StmtPrinter::VisitOMPUnrollDirective(OMPUnrollDirective *Node) { PrintOMPExecutableDirective(Node); } +void StmtPrinter::VisitOMPReverseDirective(OMPReverseDirective *Node) { + Indent() << "#pragma omp reverse"; + PrintOMPExecutableDirective(Node); +} + +void StmtPrinter::VisitOMPInterchangeDirective(OMPInterchangeDirective *Node) { + Indent() << "#pragma omp interchange"; + PrintOMPExecutableDirective(Node); +} + void StmtPrinter::VisitOMPForDirective(OMPForDirective *Node) { Indent() << "#pragma omp for"; PrintOMPExecutableDirective(Node); @@ -694,6 +793,11 @@ void StmtPrinter::VisitOMPSectionDirective(OMPSectionDirective *Node) { PrintOMPExecutableDirective(Node); } +void StmtPrinter::VisitOMPScopeDirective(OMPScopeDirective *Node) { + Indent() << "#pragma omp scope"; + PrintOMPExecutableDirective(Node); +} + void StmtPrinter::VisitOMPSingleDirective(OMPSingleDirective *Node) { Indent() << "#pragma omp single"; PrintOMPExecutableDirective(Node); @@ -731,6 +835,12 @@ void StmtPrinter::VisitOMPParallelMasterDirective( PrintOMPExecutableDirective(Node); } +void StmtPrinter::VisitOMPParallelMaskedDirective( + OMPParallelMaskedDirective *Node) { + Indent() << "#pragma omp parallel masked"; + PrintOMPExecutableDirective(Node); +} + void StmtPrinter::VisitOMPParallelSectionsDirective( OMPParallelSectionsDirective *Node) { Indent() << "#pragma omp parallel sections"; @@ -757,6 +867,11 @@ void StmtPrinter::VisitOMPTaskwaitDirective(OMPTaskwaitDirective *Node) { PrintOMPExecutableDirective(Node); } +void StmtPrinter::VisitOMPErrorDirective(OMPErrorDirective *Node) { + Indent() << "#pragma omp error"; + PrintOMPExecutableDirective(Node); +} + void StmtPrinter::VisitOMPTaskgroupDirective(OMPTaskgroupDirective *Node) { Indent() << "#pragma omp taskgroup"; PrintOMPExecutableDirective(Node); @@ -856,24 +971,48 @@ void StmtPrinter::VisitOMPMasterTaskLoopDirective( PrintOMPExecutableDirective(Node); } +void StmtPrinter::VisitOMPMaskedTaskLoopDirective( + OMPMaskedTaskLoopDirective *Node) { + Indent() << "#pragma omp masked taskloop"; + PrintOMPExecutableDirective(Node); +} + void StmtPrinter::VisitOMPMasterTaskLoopSimdDirective( OMPMasterTaskLoopSimdDirective *Node) { Indent() << "#pragma omp master taskloop simd"; PrintOMPExecutableDirective(Node); } +void StmtPrinter::VisitOMPMaskedTaskLoopSimdDirective( + OMPMaskedTaskLoopSimdDirective *Node) { + Indent() << "#pragma omp masked taskloop simd"; + PrintOMPExecutableDirective(Node); +} + void StmtPrinter::VisitOMPParallelMasterTaskLoopDirective( OMPParallelMasterTaskLoopDirective *Node) { Indent() << "#pragma omp parallel master taskloop"; PrintOMPExecutableDirective(Node); } +void StmtPrinter::VisitOMPParallelMaskedTaskLoopDirective( + OMPParallelMaskedTaskLoopDirective *Node) { + Indent() << "#pragma omp parallel masked taskloop"; + PrintOMPExecutableDirective(Node); +} + void StmtPrinter::VisitOMPParallelMasterTaskLoopSimdDirective( OMPParallelMasterTaskLoopSimdDirective *Node) { Indent() << "#pragma omp parallel master taskloop simd"; PrintOMPExecutableDirective(Node); } +void StmtPrinter::VisitOMPParallelMaskedTaskLoopSimdDirective( + OMPParallelMaskedTaskLoopSimdDirective *Node) { + Indent() << "#pragma omp parallel masked taskloop simd"; + PrintOMPExecutableDirective(Node); +} + void StmtPrinter::VisitOMPDistributeDirective(OMPDistributeDirective *Node) { Indent() << "#pragma omp distribute"; PrintOMPExecutableDirective(Node); @@ -982,6 +1121,64 @@ void StmtPrinter::VisitOMPMaskedDirective(OMPMaskedDirective *Node) { PrintOMPExecutableDirective(Node); } +void StmtPrinter::VisitOMPGenericLoopDirective(OMPGenericLoopDirective *Node) { + Indent() << "#pragma omp loop"; + PrintOMPExecutableDirective(Node); +} + +void StmtPrinter::VisitOMPTeamsGenericLoopDirective( + OMPTeamsGenericLoopDirective *Node) { + Indent() << "#pragma omp teams loop"; + PrintOMPExecutableDirective(Node); +} + +void StmtPrinter::VisitOMPTargetTeamsGenericLoopDirective( + OMPTargetTeamsGenericLoopDirective *Node) { + Indent() << "#pragma omp target teams loop"; + PrintOMPExecutableDirective(Node); +} + +void StmtPrinter::VisitOMPParallelGenericLoopDirective( + OMPParallelGenericLoopDirective *Node) { + Indent() << "#pragma omp parallel loop"; + PrintOMPExecutableDirective(Node); +} + +void StmtPrinter::VisitOMPTargetParallelGenericLoopDirective( + OMPTargetParallelGenericLoopDirective *Node) { + Indent() << "#pragma omp target parallel loop"; + PrintOMPExecutableDirective(Node); +} + +//===----------------------------------------------------------------------===// +// OpenACC construct printing methods +//===----------------------------------------------------------------------===// +void StmtPrinter::VisitOpenACCComputeConstruct(OpenACCComputeConstruct *S) { + Indent() << "#pragma acc " << S->getDirectiveKind(); + + if (!S->clauses().empty()) { + OS << ' '; + OpenACCClausePrinter Printer(OS, Policy); + Printer.VisitClauseList(S->clauses()); + } + OS << '\n'; + + PrintStmt(S->getStructuredBlock()); +} + +void StmtPrinter::VisitOpenACCLoopConstruct(OpenACCLoopConstruct *S) { + Indent() << "#pragma acc loop"; + + if (!S->clauses().empty()) { + OS << ' '; + OpenACCClausePrinter Printer(OS, Policy); + Printer.VisitClauseList(S->clauses()); + } + OS << '\n'; + + PrintStmt(S->getLoop()); +} + //===----------------------------------------------------------------------===// // Expr printing methods. //===----------------------------------------------------------------------===// @@ -990,6 +1187,10 @@ void StmtPrinter::VisitSourceLocExpr(SourceLocExpr *Node) { OS << Node->getBuiltinStr() << "()"; } +void StmtPrinter::VisitEmbedExpr(EmbedExpr *Node) { + llvm::report_fatal_error("Not implemented"); +} + void StmtPrinter::VisitConstantExpr(ConstantExpr *Node) { PrintExpr(Node->getSubExpr()); } @@ -1000,14 +1201,19 @@ void StmtPrinter::VisitDeclRefExpr(DeclRefExpr *Node) { return; } if (const auto *TPOD = dyn_cast<TemplateParamObjectDecl>(Node->getDecl())) { - TPOD->printAsExpr(OS); + TPOD->printAsExpr(OS, Policy); return; } if (NestedNameSpecifier *Qualifier = Node->getQualifier()) Qualifier->print(OS, Policy); if (Node->hasTemplateKeyword()) OS << "template "; - OS << Node->getNameInfo(); + if (Policy.CleanUglifiedParameters && + isa<ParmVarDecl, NonTypeTemplateParmDecl>(Node->getDecl()) && + Node->getDecl()->getIdentifier()) + OS << Node->getDecl()->getIdentifier()->deuglifiedName(); + else + Node->getNameInfo().printName(OS, Policy); if (Node->hasExplicitTemplateArgs()) { const TemplateParameterList *TPL = nullptr; if (!Node->hadMultipleCandidates()) @@ -1041,7 +1247,7 @@ void StmtPrinter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *Node) { static bool isImplicitSelf(const Expr *E) { if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) { if (const auto *PD = dyn_cast<ImplicitParamDecl>(DRE->getDecl())) { - if (PD->getParameterKind() == ImplicitParamDecl::ObjCSelf && + if (PD->getParameterKind() == ImplicitParamKind::ObjCSelf && DRE->getBeginLoc().isInvalid()) return true; } @@ -1125,12 +1331,18 @@ void StmtPrinter::VisitIntegerLiteral(IntegerLiteral *Node) { bool isSigned = Node->getType()->isSignedIntegerType(); OS << toString(Node->getValue(), 10, isSigned); + if (isa<BitIntType>(Node->getType())) { + OS << (isSigned ? "wb" : "uwb"); + return; + } + // Emit suffixes. Integer literals are always a builtin integer type. switch (Node->getType()->castAs<BuiltinType>()->getKind()) { default: llvm_unreachable("Unexpected type for integer literal!"); case BuiltinType::Char_S: case BuiltinType::Char_U: OS << "i8"; break; case BuiltinType::UChar: OS << "Ui8"; break; + case BuiltinType::SChar: OS << "i8"; break; case BuiltinType::Short: OS << "i16"; break; case BuiltinType::UShort: OS << "Ui16"; break; case BuiltinType::Int: break; // no suffix. @@ -1143,6 +1355,9 @@ void StmtPrinter::VisitIntegerLiteral(IntegerLiteral *Node) { break; // no suffix. case BuiltinType::UInt128: break; // no suffix. + case BuiltinType::WChar_S: + case BuiltinType::WChar_U: + break; // no suffix } } @@ -1183,6 +1398,7 @@ static void PrintFloatingLiteral(raw_ostream &OS, FloatingLiteral *Node, switch (Node->getType()->castAs<BuiltinType>()->getKind()) { default: llvm_unreachable("Unexpected type for float literal!"); case BuiltinType::Half: break; // FIXME: suffix? + case BuiltinType::Ibm128: break; // FIXME: No suffix for ibm128 literal case BuiltinType::Double: break; // no suffix. case BuiltinType::Float16: OS << "F16"; break; case BuiltinType::Float: OS << 'F'; break; @@ -1259,7 +1475,7 @@ void StmtPrinter::VisitOffsetOfExpr(OffsetOfExpr *Node) { continue; // Field or identifier node. - IdentifierInfo *Id = ON.getFieldName(); + const IdentifierInfo *Id = ON.getFieldName(); if (!Id) continue; @@ -1298,8 +1514,12 @@ void StmtPrinter::VisitUnaryExprOrTypeTraitExpr( void StmtPrinter::VisitGenericSelectionExpr(GenericSelectionExpr *Node) { OS << "_Generic("; - PrintExpr(Node->getControllingExpr()); - for (const GenericSelectionExpr::Association Assoc : Node->associations()) { + if (Node->isExprPredicate()) + PrintExpr(Node->getControllingExpr()); + else + Node->getControllingType()->getType().print(OS, Policy); + + for (const GenericSelectionExpr::Association &Assoc : Node->associations()) { OS << ", "; QualType T = Assoc.getType(); if (T.isNull()) @@ -1329,7 +1549,7 @@ void StmtPrinter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *Node) { OS << "]"; } -void StmtPrinter::VisitOMPArraySectionExpr(OMPArraySectionExpr *Node) { +void StmtPrinter::VisitArraySectionExpr(ArraySectionExpr *Node) { PrintExpr(Node->getBase()); OS << "["; if (Node->getLowerBound()) @@ -1339,7 +1559,7 @@ void StmtPrinter::VisitOMPArraySectionExpr(OMPArraySectionExpr *Node) { if (Node->getLength()) PrintExpr(Node->getLength()); } - if (Node->getColonLocSecond().isValid()) { + if (Node->isOMPArraySection() && Node->getColonLocSecond().isValid()) { OS << ":"; if (Node->getStride()) PrintExpr(Node->getStride()); @@ -1580,7 +1800,7 @@ void StmtPrinter::VisitDesignatedInitExpr(DesignatedInitExpr *Node) { for (const DesignatedInitExpr::Designator &D : Node->designators()) { if (D.isFieldDesignator()) { if (D.getDotLoc().isInvalid()) { - if (IdentifierInfo *II = D.getFieldName()) { + if (const IdentifierInfo *II = D.getFieldName()) { OS << II->getName() << ":"; NeedsEquals = false; } @@ -1659,7 +1879,7 @@ void StmtPrinter::VisitAtomicExpr(AtomicExpr *Node) { case AtomicExpr::AO ## ID: \ Name = #ID "("; \ break; -#include "clang/Basic/Builtins.def" +#include "clang/Basic/Builtins.inc" } OS << Name; @@ -1667,7 +1887,9 @@ void StmtPrinter::VisitAtomicExpr(AtomicExpr *Node) { PrintExpr(Node->getPtr()); if (Node->getOp() != AtomicExpr::AO__c11_atomic_load && Node->getOp() != AtomicExpr::AO__atomic_load_n && - Node->getOp() != AtomicExpr::AO__opencl_atomic_load) { + Node->getOp() != AtomicExpr::AO__scoped_atomic_load_n && + Node->getOp() != AtomicExpr::AO__opencl_atomic_load && + Node->getOp() != AtomicExpr::AO__hip_atomic_load) { OS << ", "; PrintExpr(Node->getVal1()); } @@ -1706,21 +1928,16 @@ void StmtPrinter::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *Node) { } } else if (Kind == OO_Arrow) { PrintExpr(Node->getArg(0)); - } else if (Kind == OO_Call) { + } else if (Kind == OO_Call || Kind == OO_Subscript) { PrintExpr(Node->getArg(0)); - OS << '('; + OS << (Kind == OO_Call ? '(' : '['); for (unsigned ArgIdx = 1; ArgIdx < Node->getNumArgs(); ++ArgIdx) { if (ArgIdx > 1) OS << ", "; if (!isa<CXXDefaultArgExpr>(Node->getArg(ArgIdx))) PrintExpr(Node->getArg(ArgIdx)); } - OS << ')'; - } else if (Kind == OO_Subscript) { - PrintExpr(Node->getArg(0)); - OS << '['; - PrintExpr(Node->getArg(1)); - OS << ']'; + OS << (Kind == OO_Call ? ')' : ']'); } else if (Node->getNumArgs() == 1) { OS << getOperatorSpelling(Kind) << ' '; PrintExpr(Node->getArg(0)); @@ -1736,7 +1953,7 @@ void StmtPrinter::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *Node) { void StmtPrinter::VisitCXXMemberCallExpr(CXXMemberCallExpr *Node) { // If we have a conversion operator call only print the argument. CXXMethodDecl *MD = Node->getMethodDecl(); - if (MD && isa<CXXConversionDecl>(MD)) { + if (isa_and_nonnull<CXXConversionDecl>(MD)) { PrintExpr(Node->getImplicitObjectArgument()); return; } @@ -1847,7 +2064,7 @@ void StmtPrinter::VisitUserDefinedLiteral(UserDefinedLiteral *Node) { cast<FunctionDecl>(DRE->getDecl())->getTemplateSpecializationArgs(); assert(Args); - if (Args->size() != 1) { + if (Args->size() != 1 || Args->get(0).getKind() != TemplateArgument::Pack) { const TemplateParameterList *TPL = nullptr; if (!DRE->hadMultipleCandidates()) if (const auto *TD = dyn_cast<TemplateDecl>(DRE->getDecl())) @@ -1915,14 +2132,23 @@ void StmtPrinter::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *Node) { } void StmtPrinter::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *Node) { - Node->getType().print(OS, Policy); - // If there are no parens, this is list-initialization, and the braces are - // part of the syntax of the inner construct. - if (Node->getLParenLoc().isValid()) - OS << "("; + auto TargetType = Node->getType(); + auto *Auto = TargetType->getContainedDeducedType(); + bool Bare = Auto && Auto->isDeduced(); + + // Parenthesize deduced casts. + if (Bare) + OS << '('; + TargetType.print(OS, Policy); + if (Bare) + OS << ')'; + + // No extra braces surrounding the inner construct. + if (!Node->isListInitialization()) + OS << '('; PrintExpr(Node->getSubExpr()); - if (Node->getLParenLoc().isValid()) - OS << ")"; + if (!Node->isListInitialization()) + OS << ')'; } void StmtPrinter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *Node) { @@ -2009,7 +2235,8 @@ void StmtPrinter::VisitLambdaExpr(LambdaExpr *Node) { OS << "..."; if (Node->isInitCapture(C)) { - VarDecl *D = C->getCapturedVar(); + // Init captures are always VarDecl. + auto *D = cast<VarDecl>(C->getCapturedVar()); llvm::StringRef Pre; llvm::StringRef Post; @@ -2044,7 +2271,10 @@ void StmtPrinter::VisitLambdaExpr(LambdaExpr *Node) { } else { NeedComma = true; } - std::string ParamStr = P->getNameAsString(); + std::string ParamStr = + (Policy.CleanUglifiedParameters && P->getIdentifier()) + ? P->getIdentifier()->deuglifiedName().str() + : P->getNameAsString(); P->getOriginalType().print(OS, Policy, ParamStr); } if (Method->isVariadic()) { @@ -2104,10 +2334,10 @@ void StmtPrinter::VisitCXXNewExpr(CXXNewExpr *E) { if (E->isParenTypeId()) OS << "("; std::string TypeS; - if (Optional<Expr *> Size = E->getArraySize()) { + if (E->isArray()) { llvm::raw_string_ostream s(TypeS); s << '['; - if (*Size) + if (std::optional<Expr *> Size = E->getArraySize()) (*Size)->printPretty(s, Helper, Policy); s << ']'; } @@ -2115,12 +2345,14 @@ void StmtPrinter::VisitCXXNewExpr(CXXNewExpr *E) { if (E->isParenTypeId()) OS << ")"; - CXXNewExpr::InitializationStyle InitStyle = E->getInitializationStyle(); - if (InitStyle) { - if (InitStyle == CXXNewExpr::CallInit) + CXXNewInitializationStyle InitStyle = E->getInitializationStyle(); + if (InitStyle != CXXNewInitializationStyle::None) { + bool Bare = InitStyle == CXXNewInitializationStyle::Parens && + !isa<ParenListExpr>(E->getInitializer()); + if (Bare) OS << "("; PrintExpr(E->getInitializer()); - if (InitStyle == CXXNewExpr::CallInit) + if (Bare) OS << ")"; } } @@ -2144,7 +2376,7 @@ void StmtPrinter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) { E->getQualifier()->print(OS, Policy); OS << "~"; - if (IdentifierInfo *II = E->getDestroyedTypeIdentifier()) + if (const IdentifierInfo *II = E->getDestroyedTypeIdentifier()) OS << II->getName(); else E->getDestroyedType().print(OS, Policy); @@ -2182,19 +2414,19 @@ void StmtPrinter::VisitExprWithCleanups(ExprWithCleanups *E) { PrintExpr(E->getSubExpr()); } -void -StmtPrinter::VisitCXXUnresolvedConstructExpr( - CXXUnresolvedConstructExpr *Node) { +void StmtPrinter::VisitCXXUnresolvedConstructExpr( + CXXUnresolvedConstructExpr *Node) { Node->getTypeAsWritten().print(OS, Policy); - OS << "("; - for (CXXUnresolvedConstructExpr::arg_iterator Arg = Node->arg_begin(), - ArgEnd = Node->arg_end(); - Arg != ArgEnd; ++Arg) { + if (!Node->isListInitialization()) + OS << '('; + for (auto Arg = Node->arg_begin(), ArgEnd = Node->arg_end(); Arg != ArgEnd; + ++Arg) { if (Arg != Node->arg_begin()) OS << ", "; PrintExpr(*Arg); } - OS << ")"; + if (!Node->isListInitialization()) + OS << ')'; } void StmtPrinter::VisitCXXDependentScopeMemberExpr( @@ -2263,6 +2495,10 @@ void StmtPrinter::VisitSizeOfPackExpr(SizeOfPackExpr *E) { OS << "sizeof...(" << *E->getPack() << ")"; } +void StmtPrinter::VisitPackIndexingExpr(PackIndexingExpr *E) { + OS << E->getPackIdExpression() << "...[" << E->getIndexExpr() << "]"; +} + void StmtPrinter::VisitSubstNonTypeTemplateParmPackExpr( SubstNonTypeTemplateParmPackExpr *Node) { OS << *Node->getParameterPack(); @@ -2295,6 +2531,13 @@ void StmtPrinter::VisitCXXFoldExpr(CXXFoldExpr *E) { OS << ")"; } +void StmtPrinter::VisitCXXParenListInitExpr(CXXParenListInitExpr *Node) { + OS << "("; + llvm::interleaveComma(Node->getInitExprs(), OS, + [&](Expr *E) { PrintExpr(E); }); + OS << ")"; +} + void StmtPrinter::VisitConceptSpecializationExpr(ConceptSpecializationExpr *E) { NestedNameSpecifierLoc NNS = E->getNestedNameSpecifierLoc(); if (NNS) @@ -2351,7 +2594,7 @@ void StmtPrinter::VisitRequiresExpr(RequiresExpr *E) { } else { auto *NestedReq = cast<concepts::NestedRequirement>(Req); OS << "requires "; - if (NestedReq->isSubstitutionFailure()) + if (NestedReq->hasInvalidConstraint()) OS << "<<error-expression>>"; else PrintExpr(NestedReq->getConstraintExpr()); @@ -2361,7 +2604,7 @@ void StmtPrinter::VisitRequiresExpr(RequiresExpr *E) { OS << "}"; } -// C++ Coroutines TS +// C++ Coroutines void StmtPrinter::VisitCoroutineBodyStmt(CoroutineBodyStmt *S) { Visit(S->getBody()); @@ -2571,6 +2814,14 @@ void Stmt::printPretty(raw_ostream &Out, PrinterHelper *Helper, P.Visit(const_cast<Stmt *>(this)); } +void Stmt::printPrettyControlled(raw_ostream &Out, PrinterHelper *Helper, + const PrintingPolicy &Policy, + unsigned Indentation, StringRef NL, + const ASTContext *Context) const { + StmtPrinter P(Out, Helper, Policy, Indentation, NL, Context); + P.PrintControlledStmt(const_cast<Stmt *>(this)); +} + void Stmt::printJson(raw_ostream &Out, PrinterHelper *Helper, const PrintingPolicy &Policy, bool AddQuotes) const { std::string Buf; diff --git a/contrib/llvm-project/clang/lib/AST/StmtProfile.cpp b/contrib/llvm-project/clang/lib/AST/StmtProfile.cpp index ed000c2467fa..89d2a422509d 100644 --- a/contrib/llvm-project/clang/lib/AST/StmtProfile.cpp +++ b/contrib/llvm-project/clang/lib/AST/StmtProfile.cpp @@ -29,15 +29,21 @@ namespace { protected: llvm::FoldingSetNodeID &ID; bool Canonical; + bool ProfileLambdaExpr; public: - StmtProfiler(llvm::FoldingSetNodeID &ID, bool Canonical) - : ID(ID), Canonical(Canonical) {} + StmtProfiler(llvm::FoldingSetNodeID &ID, bool Canonical, + bool ProfileLambdaExpr) + : ID(ID), Canonical(Canonical), ProfileLambdaExpr(ProfileLambdaExpr) {} virtual ~StmtProfiler() {} void VisitStmt(const Stmt *S); + void VisitStmtNoChildren(const Stmt *S) { + HandleStmtClass(S->getStmtClass()); + } + virtual void HandleStmtClass(Stmt::StmtClass SC) = 0; #define STMT(Node, Base) void Visit##Node(const Node *S); @@ -55,7 +61,7 @@ namespace { virtual void VisitName(DeclarationName Name, bool TreatAsDecl = false) = 0; /// Visit identifiers that are not in Decl's or Type's. - virtual void VisitIdentifierInfo(IdentifierInfo *II) = 0; + virtual void VisitIdentifierInfo(const IdentifierInfo *II) = 0; /// Visit a nested-name-specifier that occurs within an expression /// or statement. @@ -79,8 +85,10 @@ namespace { public: StmtProfilerWithPointers(llvm::FoldingSetNodeID &ID, - const ASTContext &Context, bool Canonical) - : StmtProfiler(ID, Canonical), Context(Context) {} + const ASTContext &Context, bool Canonical, + bool ProfileLambdaExpr) + : StmtProfiler(ID, Canonical, ProfileLambdaExpr), Context(Context) {} + private: void HandleStmtClass(Stmt::StmtClass SC) override { ID.AddInteger(SC); @@ -95,7 +103,15 @@ namespace { ID.AddInteger(NTTP->getDepth()); ID.AddInteger(NTTP->getIndex()); ID.AddBoolean(NTTP->isParameterPack()); - VisitType(NTTP->getType()); + // C++20 [temp.over.link]p6: + // Two template-parameters are equivalent under the following + // conditions: [...] if they declare non-type template parameters, + // they have equivalent types ignoring the use of type-constraints + // for placeholder types + // + // TODO: Why do we need to include the type in the profile? It's not + // part of the mangling. + VisitType(Context.getUnconstrainedType(NTTP->getType())); return; } @@ -107,6 +123,9 @@ namespace { // definition of "equivalent" (per C++ [temp.over.link]) is at // least as strong as the definition of "equivalent" used for // name mangling. + // + // TODO: The Itanium C++ ABI only uses the top-level cv-qualifiers, + // not the entirety of the type. VisitType(Parm->getType()); ID.AddInteger(Parm->getFunctionScopeDepth()); ID.AddInteger(Parm->getFunctionScopeIndex()); @@ -144,7 +163,7 @@ namespace { ID.AddPointer(Name.getAsOpaquePtr()); } - void VisitIdentifierInfo(IdentifierInfo *II) override { + void VisitIdentifierInfo(const IdentifierInfo *II) override { ID.AddPointer(II); } @@ -166,7 +185,8 @@ namespace { ODRHash &Hash; public: StmtProfilerWithoutPointers(llvm::FoldingSetNodeID &ID, ODRHash &Hash) - : StmtProfiler(ID, false), Hash(Hash) {} + : StmtProfiler(ID, /*Canonical=*/false, /*ProfileLambdaExpr=*/false), + Hash(Hash) {} private: void HandleStmtClass(Stmt::StmtClass SC) override { @@ -191,7 +211,7 @@ namespace { } Hash.AddDeclarationName(Name, TreatAsDecl); } - void VisitIdentifierInfo(IdentifierInfo *II) override { + void VisitIdentifierInfo(const IdentifierInfo *II) override { ID.AddBoolean(II); if (II) { Hash.AddIdentifierInfo(II); @@ -218,7 +238,7 @@ namespace { void StmtProfiler::VisitStmt(const Stmt *S) { assert(S && "Requires non-null Stmt pointer"); - HandleStmtClass(S->getStmtClass()); + VisitStmtNoChildren(S); for (const Stmt *SubStmt : S->children()) { if (SubStmt) @@ -452,6 +472,11 @@ void OMPClauseProfiler::VisitOMPNumThreadsClause(const OMPNumThreadsClause *C) { Profiler->VisitStmt(C->getNumThreads()); } +void OMPClauseProfiler::VisitOMPAlignClause(const OMPAlignClause *C) { + if (C->getAlignment()) + Profiler->VisitStmt(C->getAlignment()); +} + void OMPClauseProfiler::VisitOMPSafelenClause(const OMPSafelenClause *C) { if (C->getSafelen()) Profiler->VisitStmt(C->getSafelen()); @@ -463,7 +488,7 @@ void OMPClauseProfiler::VisitOMPSimdlenClause(const OMPSimdlenClause *C) { } void OMPClauseProfiler::VisitOMPSizesClause(const OMPSizesClause *C) { - for (auto E : C->getSizesRefs()) + for (auto *E : C->getSizesRefs()) if (E) Profiler->VisitExpr(E); } @@ -521,6 +546,15 @@ void OMPClauseProfiler::VisitOMPDynamicAllocatorsClause( void OMPClauseProfiler::VisitOMPAtomicDefaultMemOrderClause( const OMPAtomicDefaultMemOrderClause *C) {} +void OMPClauseProfiler::VisitOMPAtClause(const OMPAtClause *C) {} + +void OMPClauseProfiler::VisitOMPSeverityClause(const OMPSeverityClause *C) {} + +void OMPClauseProfiler::VisitOMPMessageClause(const OMPMessageClause *C) { + if (C->getMessageString()) + Profiler->VisitStmt(C->getMessageString()); +} + void OMPClauseProfiler::VisitOMPScheduleClause(const OMPScheduleClause *C) { VistOMPClauseWithPreInit(C); if (auto *S = C->getChunkSize()) @@ -546,6 +580,10 @@ void OMPClauseProfiler::VisitOMPUpdateClause(const OMPUpdateClause *) {} void OMPClauseProfiler::VisitOMPCaptureClause(const OMPCaptureClause *) {} +void OMPClauseProfiler::VisitOMPCompareClause(const OMPCompareClause *) {} + +void OMPClauseProfiler::VisitOMPFailClause(const OMPFailClause *) {} + void OMPClauseProfiler::VisitOMPSeqCstClause(const OMPSeqCstClause *) {} void OMPClauseProfiler::VisitOMPAcqRelClause(const OMPAcqRelClause *) {} @@ -556,6 +594,8 @@ void OMPClauseProfiler::VisitOMPReleaseClause(const OMPReleaseClause *) {} void OMPClauseProfiler::VisitOMPRelaxedClause(const OMPRelaxedClause *) {} +void OMPClauseProfiler::VisitOMPWeakClause(const OMPWeakClause *) {} + void OMPClauseProfiler::VisitOMPThreadsClause(const OMPThreadsClause *) {} void OMPClauseProfiler::VisitOMPSIMDClause(const OMPSIMDClause *) {} @@ -850,6 +890,10 @@ void OMPClauseProfiler::VisitOMPIsDevicePtrClause( const OMPIsDevicePtrClause *C) { VisitOMPClauseList(C); } +void OMPClauseProfiler::VisitOMPHasDeviceAddrClause( + const OMPHasDeviceAddrClause *C) { + VisitOMPClauseList(C); +} void OMPClauseProfiler::VisitOMPNontemporalClause( const OMPNontemporalClause *C) { VisitOMPClauseList(C); @@ -878,6 +922,19 @@ void OMPClauseProfiler::VisitOMPAffinityClause(const OMPAffinityClause *C) { Profiler->VisitStmt(E); } void OMPClauseProfiler::VisitOMPOrderClause(const OMPOrderClause *C) {} +void OMPClauseProfiler::VisitOMPBindClause(const OMPBindClause *C) {} +void OMPClauseProfiler::VisitOMPXDynCGroupMemClause( + const OMPXDynCGroupMemClause *C) { + VistOMPClauseWithPreInit(C); + if (Expr *Size = C->getSize()) + Profiler->VisitStmt(Size); +} +void OMPClauseProfiler::VisitOMPDoacrossClause(const OMPDoacrossClause *C) { + VisitOMPClauseList(C); +} +void OMPClauseProfiler::VisitOMPXAttributeClause(const OMPXAttributeClause *C) { +} +void OMPClauseProfiler::VisitOMPXBareClause(const OMPXBareClause *C) {} } // namespace void @@ -903,6 +960,10 @@ void StmtProfiler::VisitOMPLoopDirective(const OMPLoopDirective *S) { VisitOMPLoopBasedDirective(S); } +void StmtProfiler::VisitOMPMetaDirective(const OMPMetaDirective *S) { + VisitOMPExecutableDirective(S); +} + void StmtProfiler::VisitOMPParallelDirective(const OMPParallelDirective *S) { VisitOMPExecutableDirective(S); } @@ -911,12 +972,26 @@ void StmtProfiler::VisitOMPSimdDirective(const OMPSimdDirective *S) { VisitOMPLoopDirective(S); } -void StmtProfiler::VisitOMPTileDirective(const OMPTileDirective *S) { +void StmtProfiler::VisitOMPLoopTransformationDirective( + const OMPLoopTransformationDirective *S) { VisitOMPLoopBasedDirective(S); } +void StmtProfiler::VisitOMPTileDirective(const OMPTileDirective *S) { + VisitOMPLoopTransformationDirective(S); +} + void StmtProfiler::VisitOMPUnrollDirective(const OMPUnrollDirective *S) { - VisitOMPLoopBasedDirective(S); + VisitOMPLoopTransformationDirective(S); +} + +void StmtProfiler::VisitOMPReverseDirective(const OMPReverseDirective *S) { + VisitOMPLoopTransformationDirective(S); +} + +void StmtProfiler::VisitOMPInterchangeDirective( + const OMPInterchangeDirective *S) { + VisitOMPLoopTransformationDirective(S); } void StmtProfiler::VisitOMPForDirective(const OMPForDirective *S) { @@ -935,6 +1010,10 @@ void StmtProfiler::VisitOMPSectionDirective(const OMPSectionDirective *S) { VisitOMPExecutableDirective(S); } +void StmtProfiler::VisitOMPScopeDirective(const OMPScopeDirective *S) { + VisitOMPExecutableDirective(S); +} + void StmtProfiler::VisitOMPSingleDirective(const OMPSingleDirective *S) { VisitOMPExecutableDirective(S); } @@ -963,6 +1042,11 @@ void StmtProfiler::VisitOMPParallelMasterDirective( VisitOMPExecutableDirective(S); } +void StmtProfiler::VisitOMPParallelMaskedDirective( + const OMPParallelMaskedDirective *S) { + VisitOMPExecutableDirective(S); +} + void StmtProfiler::VisitOMPParallelSectionsDirective( const OMPParallelSectionsDirective *S) { VisitOMPExecutableDirective(S); @@ -984,6 +1068,9 @@ void StmtProfiler::VisitOMPTaskwaitDirective(const OMPTaskwaitDirective *S) { VisitOMPExecutableDirective(S); } +void StmtProfiler::VisitOMPErrorDirective(const OMPErrorDirective *S) { + VisitOMPExecutableDirective(S); +} void StmtProfiler::VisitOMPTaskgroupDirective(const OMPTaskgroupDirective *S) { VisitOMPExecutableDirective(S); if (const Expr *E = S->getReductionRef()) @@ -1065,21 +1152,41 @@ void StmtProfiler::VisitOMPMasterTaskLoopDirective( VisitOMPLoopDirective(S); } +void StmtProfiler::VisitOMPMaskedTaskLoopDirective( + const OMPMaskedTaskLoopDirective *S) { + VisitOMPLoopDirective(S); +} + void StmtProfiler::VisitOMPMasterTaskLoopSimdDirective( const OMPMasterTaskLoopSimdDirective *S) { VisitOMPLoopDirective(S); } +void StmtProfiler::VisitOMPMaskedTaskLoopSimdDirective( + const OMPMaskedTaskLoopSimdDirective *S) { + VisitOMPLoopDirective(S); +} + void StmtProfiler::VisitOMPParallelMasterTaskLoopDirective( const OMPParallelMasterTaskLoopDirective *S) { VisitOMPLoopDirective(S); } +void StmtProfiler::VisitOMPParallelMaskedTaskLoopDirective( + const OMPParallelMaskedTaskLoopDirective *S) { + VisitOMPLoopDirective(S); +} + void StmtProfiler::VisitOMPParallelMasterTaskLoopSimdDirective( const OMPParallelMasterTaskLoopSimdDirective *S) { VisitOMPLoopDirective(S); } +void StmtProfiler::VisitOMPParallelMaskedTaskLoopSimdDirective( + const OMPParallelMaskedTaskLoopSimdDirective *S) { + VisitOMPLoopDirective(S); +} + void StmtProfiler::VisitOMPDistributeDirective( const OMPDistributeDirective *S) { VisitOMPLoopDirective(S); @@ -1181,6 +1288,31 @@ void StmtProfiler::VisitOMPMaskedDirective(const OMPMaskedDirective *S) { VisitOMPExecutableDirective(S); } +void StmtProfiler::VisitOMPGenericLoopDirective( + const OMPGenericLoopDirective *S) { + VisitOMPLoopDirective(S); +} + +void StmtProfiler::VisitOMPTeamsGenericLoopDirective( + const OMPTeamsGenericLoopDirective *S) { + VisitOMPLoopDirective(S); +} + +void StmtProfiler::VisitOMPTargetTeamsGenericLoopDirective( + const OMPTargetTeamsGenericLoopDirective *S) { + VisitOMPLoopDirective(S); +} + +void StmtProfiler::VisitOMPParallelGenericLoopDirective( + const OMPParallelGenericLoopDirective *S) { + VisitOMPLoopDirective(S); +} + +void StmtProfiler::VisitOMPTargetParallelGenericLoopDirective( + const OMPTargetParallelGenericLoopDirective *S) { + VisitOMPLoopDirective(S); +} + void StmtProfiler::VisitExpr(const Expr *S) { VisitStmt(S); } @@ -1209,13 +1341,21 @@ void StmtProfiler::VisitSYCLUniqueStableNameExpr( void StmtProfiler::VisitPredefinedExpr(const PredefinedExpr *S) { VisitExpr(S); - ID.AddInteger(S->getIdentKind()); + ID.AddInteger(llvm::to_underlying(S->getIdentKind())); } void StmtProfiler::VisitIntegerLiteral(const IntegerLiteral *S) { VisitExpr(S); S->getValue().Profile(ID); - ID.AddInteger(S->getType()->castAs<BuiltinType>()->getKind()); + + QualType T = S->getType(); + if (Canonical) + T = T.getCanonicalType(); + ID.AddInteger(T->getTypeClass()); + if (auto BitIntT = T->getAs<BitIntType>()) + BitIntT->Profile(ID); + else + ID.AddInteger(T->castAs<BuiltinType>()->getKind()); } void StmtProfiler::VisitFixedPointLiteral(const FixedPointLiteral *S) { @@ -1226,7 +1366,7 @@ void StmtProfiler::VisitFixedPointLiteral(const FixedPointLiteral *S) { void StmtProfiler::VisitCharacterLiteral(const CharacterLiteral *S) { VisitExpr(S); - ID.AddInteger(S->getKind()); + ID.AddInteger(llvm::to_underlying(S->getKind())); ID.AddInteger(S->getValue()); } @@ -1244,7 +1384,7 @@ void StmtProfiler::VisitImaginaryLiteral(const ImaginaryLiteral *S) { void StmtProfiler::VisitStringLiteral(const StringLiteral *S) { VisitExpr(S); ID.AddString(S->getBytes()); - ID.AddInteger(S->getKind()); + ID.AddInteger(llvm::to_underlying(S->getKind())); } void StmtProfiler::VisitParenExpr(const ParenExpr *S) { @@ -1304,7 +1444,7 @@ void StmtProfiler::VisitMatrixSubscriptExpr(const MatrixSubscriptExpr *S) { VisitExpr(S); } -void StmtProfiler::VisitOMPArraySectionExpr(const OMPArraySectionExpr *S) { +void StmtProfiler::VisitArraySectionExpr(const ArraySectionExpr *S) { VisitExpr(S); } @@ -1426,7 +1566,7 @@ void StmtProfiler::VisitDesignatedInitExpr(const DesignatedInitExpr *S) { assert(D.isArrayRangeDesignator()); ID.AddInteger(2); } - ID.AddInteger(D.getFirstExprIndex()); + ID.AddInteger(D.getArrayIndex()); } } @@ -1535,8 +1675,8 @@ void StmtProfiler::VisitRequiresExpr(const RequiresExpr *S) { } else { ID.AddInteger(concepts::Requirement::RK_Nested); auto *NestedReq = cast<concepts::NestedRequirement>(Req); - ID.AddBoolean(NestedReq->isSubstitutionFailure()); - if (!NestedReq->isSubstitutionFailure()) + ID.AddBoolean(NestedReq->hasInvalidConstraint()); + if (!NestedReq->hasInvalidConstraint()) Visit(NestedReq->getConstraintExpr()); } } @@ -1544,7 +1684,8 @@ void StmtProfiler::VisitRequiresExpr(const RequiresExpr *S) { static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S, UnaryOperatorKind &UnaryOp, - BinaryOperatorKind &BinaryOp) { + BinaryOperatorKind &BinaryOp, + unsigned &NumArgs) { switch (S->getOperator()) { case OO_None: case OO_New: @@ -1557,7 +1698,7 @@ static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S, llvm_unreachable("Invalid operator call kind"); case OO_Plus: - if (S->getNumArgs() == 1) { + if (NumArgs == 1) { UnaryOp = UO_Plus; return Stmt::UnaryOperatorClass; } @@ -1566,7 +1707,7 @@ static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S, return Stmt::BinaryOperatorClass; case OO_Minus: - if (S->getNumArgs() == 1) { + if (NumArgs == 1) { UnaryOp = UO_Minus; return Stmt::UnaryOperatorClass; } @@ -1575,7 +1716,7 @@ static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S, return Stmt::BinaryOperatorClass; case OO_Star: - if (S->getNumArgs() == 1) { + if (NumArgs == 1) { UnaryOp = UO_Deref; return Stmt::UnaryOperatorClass; } @@ -1596,7 +1737,7 @@ static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S, return Stmt::BinaryOperatorClass; case OO_Amp: - if (S->getNumArgs() == 1) { + if (NumArgs == 1) { UnaryOp = UO_AddrOf; return Stmt::UnaryOperatorClass; } @@ -1705,13 +1846,13 @@ static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S, return Stmt::BinaryOperatorClass; case OO_PlusPlus: - UnaryOp = S->getNumArgs() == 1? UO_PreInc - : UO_PostInc; + UnaryOp = NumArgs == 1 ? UO_PreInc : UO_PostInc; + NumArgs = 1; return Stmt::UnaryOperatorClass; case OO_MinusMinus: - UnaryOp = S->getNumArgs() == 1? UO_PreDec - : UO_PostDec; + UnaryOp = NumArgs == 1 ? UO_PreDec : UO_PostDec; + NumArgs = 1; return Stmt::UnaryOperatorClass; case OO_Comma: @@ -1757,10 +1898,11 @@ void StmtProfiler::VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *S) { UnaryOperatorKind UnaryOp = UO_Extension; BinaryOperatorKind BinaryOp = BO_Comma; - Stmt::StmtClass SC = DecodeOperatorCall(S, UnaryOp, BinaryOp); + unsigned NumArgs = S->getNumArgs(); + Stmt::StmtClass SC = DecodeOperatorCall(S, UnaryOp, BinaryOp, NumArgs); ID.AddInteger(SC); - for (unsigned I = 0, N = S->getNumArgs(); I != N; ++I) + for (unsigned I = 0; I != NumArgs; ++I) Visit(S->getArg(I)); if (SC == Stmt::UnaryOperatorClass) ID.AddInteger(UnaryOp); @@ -1878,6 +2020,7 @@ void StmtProfiler::VisitMSPropertySubscriptExpr( void StmtProfiler::VisitCXXThisExpr(const CXXThisExpr *S) { VisitExpr(S); ID.AddBoolean(S->isImplicit()); + ID.AddBoolean(S->isCapturedByCopyInLambdaWithExplicitObjectParameter()); } void StmtProfiler::VisitCXXThrowExpr(const CXXThrowExpr *S) { @@ -1923,31 +2066,45 @@ StmtProfiler::VisitCXXTemporaryObjectExpr(const CXXTemporaryObjectExpr *S) { void StmtProfiler::VisitLambdaExpr(const LambdaExpr *S) { - VisitExpr(S); - for (LambdaExpr::capture_iterator C = S->explicit_capture_begin(), - CEnd = S->explicit_capture_end(); - C != CEnd; ++C) { - if (C->capturesVLAType()) + if (!ProfileLambdaExpr) { + // Do not recursively visit the children of this expression. Profiling the + // body would result in unnecessary work, and is not safe to do during + // deserialization. + VisitStmtNoChildren(S); + + // C++20 [temp.over.link]p5: + // Two lambda-expressions are never considered equivalent. + VisitDecl(S->getLambdaClass()); + + return; + } + + CXXRecordDecl *Lambda = S->getLambdaClass(); + for (const auto &Capture : Lambda->captures()) { + ID.AddInteger(Capture.getCaptureKind()); + if (Capture.capturesVariable()) + VisitDecl(Capture.getCapturedVar()); + } + + // Profiling the body of the lambda may be dangerous during deserialization. + // So we'd like only to profile the signature here. + ODRHash Hasher; + // FIXME: We can't get the operator call easily by + // `CXXRecordDecl::getLambdaCallOperator()` if we're in deserialization. + // So we have to do something raw here. + for (auto *SubDecl : Lambda->decls()) { + FunctionDecl *Call = nullptr; + if (auto *FTD = dyn_cast<FunctionTemplateDecl>(SubDecl)) + Call = FTD->getTemplatedDecl(); + else if (auto *FD = dyn_cast<FunctionDecl>(SubDecl)) + Call = FD; + + if (!Call) continue; - ID.AddInteger(C->getCaptureKind()); - switch (C->getCaptureKind()) { - case LCK_StarThis: - case LCK_This: - break; - case LCK_ByRef: - case LCK_ByCopy: - VisitDecl(C->getCapturedVar()); - ID.AddBoolean(C->isPackExpansion()); - break; - case LCK_VLAType: - llvm_unreachable("VLA type in explicit captures."); - } + Hasher.AddFunctionDecl(Call, /*SkipBody=*/true); } - // Note: If we actually needed to be able to match lambda - // expressions, we would have to consider parameters and return type - // here, among other things. - VisitStmt(S->getBody()); + ID.AddInteger(Hasher.CalculateHash()); } void @@ -1971,7 +2128,7 @@ void StmtProfiler::VisitCXXNewExpr(const CXXNewExpr *S) { ID.AddInteger(S->getNumPlacementArgs()); ID.AddBoolean(S->isGlobalNew()); ID.AddBoolean(S->isParenTypeId()); - ID.AddInteger(S->getInitializationStyle()); + ID.AddInteger(llvm::to_underlying(S->getInitializationStyle())); } void @@ -2092,6 +2249,12 @@ void StmtProfiler::VisitSizeOfPackExpr(const SizeOfPackExpr *S) { } } +void StmtProfiler::VisitPackIndexingExpr(const PackIndexingExpr *E) { + VisitExpr(E); + VisitExpr(E->getPackIdExpression()); + VisitExpr(E->getIndexExpr()); +} + void StmtProfiler::VisitSubstNonTypeTemplateParmPackExpr( const SubstNonTypeTemplateParmPackExpr *S) { VisitExpr(S); @@ -2123,6 +2286,10 @@ void StmtProfiler::VisitCXXFoldExpr(const CXXFoldExpr *S) { ID.AddInteger(S->getOperator()); } +void StmtProfiler::VisitCXXParenListInitExpr(const CXXParenListInitExpr *S) { + VisitExpr(S); +} + void StmtProfiler::VisitCoroutineBodyStmt(const CoroutineBodyStmt *S) { VisitStmt(S); } @@ -2155,6 +2322,8 @@ void StmtProfiler::VisitSourceLocExpr(const SourceLocExpr *E) { VisitExpr(E); } +void StmtProfiler::VisitEmbedExpr(const EmbedExpr *E) { VisitExpr(E); } + void StmtProfiler::VisitRecoveryExpr(const RecoveryExpr *E) { VisitExpr(E); } void StmtProfiler::VisitObjCStringLiteral(const ObjCStringLiteral *S) { @@ -2285,6 +2454,12 @@ void StmtProfiler::VisitTemplateArgument(const TemplateArgument &Arg) { Arg.getAsIntegral().Profile(ID); break; + case TemplateArgument::StructuralValue: + VisitType(Arg.getStructuralValueType()); + // FIXME: Do we need to recursively decompose this ourselves? + Arg.getAsStructuralValue().Profile(ID); + break; + case TemplateArgument::Expression: Visit(Arg.getAsExpr()); break; @@ -2296,9 +2471,169 @@ void StmtProfiler::VisitTemplateArgument(const TemplateArgument &Arg) { } } +namespace { +class OpenACCClauseProfiler + : public OpenACCClauseVisitor<OpenACCClauseProfiler> { + StmtProfiler &Profiler; + +public: + OpenACCClauseProfiler(StmtProfiler &P) : Profiler(P) {} + + void VisitOpenACCClauseList(ArrayRef<const OpenACCClause *> Clauses) { + for (const OpenACCClause *Clause : Clauses) { + // TODO OpenACC: When we have clauses with expressions, we should + // profile them too. + Visit(Clause); + } + } + +#define VISIT_CLAUSE(CLAUSE_NAME) \ + void Visit##CLAUSE_NAME##Clause(const OpenACC##CLAUSE_NAME##Clause &Clause); + +#include "clang/Basic/OpenACCClauses.def" +}; + +/// Nothing to do here, there are no sub-statements. +void OpenACCClauseProfiler::VisitDefaultClause( + const OpenACCDefaultClause &Clause) {} + +void OpenACCClauseProfiler::VisitIfClause(const OpenACCIfClause &Clause) { + assert(Clause.hasConditionExpr() && + "if clause requires a valid condition expr"); + Profiler.VisitStmt(Clause.getConditionExpr()); +} + +void OpenACCClauseProfiler::VisitCopyClause(const OpenACCCopyClause &Clause) { + for (auto *E : Clause.getVarList()) + Profiler.VisitStmt(E); +} +void OpenACCClauseProfiler::VisitCopyInClause( + const OpenACCCopyInClause &Clause) { + for (auto *E : Clause.getVarList()) + Profiler.VisitStmt(E); +} + +void OpenACCClauseProfiler::VisitCopyOutClause( + const OpenACCCopyOutClause &Clause) { + for (auto *E : Clause.getVarList()) + Profiler.VisitStmt(E); +} + +void OpenACCClauseProfiler::VisitCreateClause( + const OpenACCCreateClause &Clause) { + for (auto *E : Clause.getVarList()) + Profiler.VisitStmt(E); +} + +void OpenACCClauseProfiler::VisitSelfClause(const OpenACCSelfClause &Clause) { + if (Clause.hasConditionExpr()) + Profiler.VisitStmt(Clause.getConditionExpr()); +} + +void OpenACCClauseProfiler::VisitNumGangsClause( + const OpenACCNumGangsClause &Clause) { + for (auto *E : Clause.getIntExprs()) + Profiler.VisitStmt(E); +} + +void OpenACCClauseProfiler::VisitNumWorkersClause( + const OpenACCNumWorkersClause &Clause) { + assert(Clause.hasIntExpr() && "num_workers clause requires a valid int expr"); + Profiler.VisitStmt(Clause.getIntExpr()); +} + +void OpenACCClauseProfiler::VisitPrivateClause( + const OpenACCPrivateClause &Clause) { + for (auto *E : Clause.getVarList()) + Profiler.VisitStmt(E); +} + +void OpenACCClauseProfiler::VisitFirstPrivateClause( + const OpenACCFirstPrivateClause &Clause) { + for (auto *E : Clause.getVarList()) + Profiler.VisitStmt(E); +} + +void OpenACCClauseProfiler::VisitAttachClause( + const OpenACCAttachClause &Clause) { + for (auto *E : Clause.getVarList()) + Profiler.VisitStmt(E); +} + +void OpenACCClauseProfiler::VisitDevicePtrClause( + const OpenACCDevicePtrClause &Clause) { + for (auto *E : Clause.getVarList()) + Profiler.VisitStmt(E); +} + +void OpenACCClauseProfiler::VisitNoCreateClause( + const OpenACCNoCreateClause &Clause) { + for (auto *E : Clause.getVarList()) + Profiler.VisitStmt(E); +} + +void OpenACCClauseProfiler::VisitPresentClause( + const OpenACCPresentClause &Clause) { + for (auto *E : Clause.getVarList()) + Profiler.VisitStmt(E); +} + +void OpenACCClauseProfiler::VisitVectorLengthClause( + const OpenACCVectorLengthClause &Clause) { + assert(Clause.hasIntExpr() && + "vector_length clause requires a valid int expr"); + Profiler.VisitStmt(Clause.getIntExpr()); +} + +void OpenACCClauseProfiler::VisitAsyncClause(const OpenACCAsyncClause &Clause) { + if (Clause.hasIntExpr()) + Profiler.VisitStmt(Clause.getIntExpr()); +} + +void OpenACCClauseProfiler::VisitWaitClause(const OpenACCWaitClause &Clause) { + if (Clause.hasDevNumExpr()) + Profiler.VisitStmt(Clause.getDevNumExpr()); + for (auto *E : Clause.getQueueIdExprs()) + Profiler.VisitStmt(E); +} +/// Nothing to do here, there are no sub-statements. +void OpenACCClauseProfiler::VisitDeviceTypeClause( + const OpenACCDeviceTypeClause &Clause) {} + +void OpenACCClauseProfiler::VisitAutoClause(const OpenACCAutoClause &Clause) {} + +void OpenACCClauseProfiler::VisitIndependentClause( + const OpenACCIndependentClause &Clause) {} + +void OpenACCClauseProfiler::VisitSeqClause(const OpenACCSeqClause &Clause) {} + +void OpenACCClauseProfiler::VisitReductionClause( + const OpenACCReductionClause &Clause) { + for (auto *E : Clause.getVarList()) + Profiler.VisitStmt(E); +} +} // namespace + +void StmtProfiler::VisitOpenACCComputeConstruct( + const OpenACCComputeConstruct *S) { + // VisitStmt handles children, so the AssociatedStmt is handled. + VisitStmt(S); + + OpenACCClauseProfiler P{*this}; + P.VisitOpenACCClauseList(S->clauses()); +} + +void StmtProfiler::VisitOpenACCLoopConstruct(const OpenACCLoopConstruct *S) { + // VisitStmt handles children, so the Loop is handled. + VisitStmt(S); + + OpenACCClauseProfiler P{*this}; + P.VisitOpenACCClauseList(S->clauses()); +} + void Stmt::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, - bool Canonical) const { - StmtProfilerWithPointers Profiler(ID, Context, Canonical); + bool Canonical, bool ProfileLambdaExpr) const { + StmtProfilerWithPointers Profiler(ID, Context, Canonical, ProfileLambdaExpr); Profiler.Visit(this); } diff --git a/contrib/llvm-project/clang/lib/AST/TemplateBase.cpp b/contrib/llvm-project/clang/lib/AST/TemplateBase.cpp index f44230d1bd03..2e6839e948d9 100644 --- a/contrib/llvm-project/clang/lib/AST/TemplateBase.cpp +++ b/contrib/llvm-project/clang/lib/AST/TemplateBase.cpp @@ -29,7 +29,6 @@ #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/APSInt.h" #include "llvm/ADT/FoldingSet.h" -#include "llvm/ADT/None.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/StringRef.h" @@ -41,6 +40,7 @@ #include <cstddef> #include <cstdint> #include <cstring> +#include <optional> using namespace clang; @@ -59,15 +59,17 @@ static void printIntegral(const TemplateArgument &TemplArg, raw_ostream &Out, const Type *T = TemplArg.getIntegralType().getTypePtr(); const llvm::APSInt &Val = TemplArg.getAsIntegral(); - if (const EnumType *ET = T->getAs<EnumType>()) { - for (const EnumConstantDecl* ECD : ET->getDecl()->enumerators()) { - // In Sema::CheckTemplateArugment, enum template arguments value are - // extended to the size of the integer underlying the enum type. This - // may create a size difference between the enum value and template - // argument value, requiring isSameValue here instead of operator==. - if (llvm::APSInt::isSameValue(ECD->getInitVal(), Val)) { - ECD->printQualifiedName(Out, Policy); - return; + if (Policy.UseEnumerators) { + if (const EnumType *ET = T->getAs<EnumType>()) { + for (const EnumConstantDecl *ECD : ET->getDecl()->enumerators()) { + // In Sema::CheckTemplateArugment, enum template arguments value are + // extended to the size of the integer underlying the enum type. This + // may create a size difference between the enum value and template + // argument value, requiring isSameValue here instead of operator==. + if (llvm::APSInt::isSameValue(ECD->getInitVal(), Val)) { + ECD->printQualifiedName(Out, Policy); + return; + } } } } @@ -87,19 +89,20 @@ static void printIntegral(const TemplateArgument &TemplArg, raw_ostream &Out, else if (T->isSpecificBuiltinType(BuiltinType::UChar)) Out << "(unsigned char)"; } - CharacterLiteral::print(Val.getZExtValue(), CharacterLiteral::Ascii, Out); + CharacterLiteral::print(Val.getZExtValue(), CharacterLiteralKind::Ascii, + Out); } else if (T->isAnyCharacterType() && !Policy.MSVCFormatting) { - CharacterLiteral::CharacterKind Kind; + CharacterLiteralKind Kind; if (T->isWideCharType()) - Kind = CharacterLiteral::Wide; + Kind = CharacterLiteralKind::Wide; else if (T->isChar8Type()) - Kind = CharacterLiteral::UTF8; + Kind = CharacterLiteralKind::UTF8; else if (T->isChar16Type()) - Kind = CharacterLiteral::UTF16; + Kind = CharacterLiteralKind::UTF16; else if (T->isChar32Type()) - Kind = CharacterLiteral::UTF32; + Kind = CharacterLiteralKind::UTF32; else - Kind = CharacterLiteral::Ascii; + Kind = CharacterLiteralKind::Ascii; CharacterLiteral::print(Val.getExtValue(), Kind, Out); } else if (IncludeType) { if (const auto *BT = T->getAs<BuiltinType>()) { @@ -158,9 +161,27 @@ static bool needsAmpersandOnTemplateArg(QualType paramType, QualType argType) { // TemplateArgument Implementation //===----------------------------------------------------------------------===// -TemplateArgument::TemplateArgument(ASTContext &Ctx, const llvm::APSInt &Value, - QualType Type) { +void TemplateArgument::initFromType(QualType T, bool IsNullPtr, + bool IsDefaulted) { + TypeOrValue.Kind = IsNullPtr ? NullPtr : Type; + TypeOrValue.IsDefaulted = IsDefaulted; + TypeOrValue.V = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr()); +} + +void TemplateArgument::initFromDeclaration(ValueDecl *D, QualType QT, + bool IsDefaulted) { + assert(D && "Expected decl"); + DeclArg.Kind = Declaration; + DeclArg.IsDefaulted = IsDefaulted; + DeclArg.QT = QT.getAsOpaquePtr(); + DeclArg.D = D; +} + +void TemplateArgument::initFromIntegral(const ASTContext &Ctx, + const llvm::APSInt &Value, + QualType Type, bool IsDefaulted) { Integer.Kind = Integral; + Integer.IsDefaulted = IsDefaulted; // Copy the APSInt value into our decomposed form. Integer.BitWidth = Value.getBitWidth(); Integer.IsUnsigned = Value.isUnsigned(); @@ -177,6 +198,61 @@ TemplateArgument::TemplateArgument(ASTContext &Ctx, const llvm::APSInt &Value, Integer.Type = Type.getAsOpaquePtr(); } +void TemplateArgument::initFromStructural(const ASTContext &Ctx, QualType Type, + const APValue &V, bool IsDefaulted) { + Value.Kind = StructuralValue; + Value.IsDefaulted = IsDefaulted; + Value.Value = new (Ctx) APValue(V); + Ctx.addDestruction(Value.Value); + Value.Type = Type.getAsOpaquePtr(); +} + +TemplateArgument::TemplateArgument(const ASTContext &Ctx, + const llvm::APSInt &Value, QualType Type, + bool IsDefaulted) { + initFromIntegral(Ctx, Value, Type, IsDefaulted); +} + +static const ValueDecl *getAsSimpleValueDeclRef(const ASTContext &Ctx, + QualType T, const APValue &V) { + // Pointers to members are relatively easy. + if (V.isMemberPointer() && V.getMemberPointerPath().empty()) + return V.getMemberPointerDecl(); + + // We model class non-type template parameters as their template parameter + // object declaration. + if (V.isStruct() || V.isUnion()) { + // Dependent types are not supposed to be described as + // TemplateParamObjectDecls. + if (T->isDependentType() || T->isInstantiationDependentType()) + return nullptr; + return Ctx.getTemplateParamObjectDecl(T, V); + } + + // Pointers and references with an empty path use the special 'Declaration' + // representation. + if (V.isLValue() && V.hasLValuePath() && V.getLValuePath().empty() && + !V.isLValueOnePastTheEnd()) + return V.getLValueBase().dyn_cast<const ValueDecl *>(); + + // Everything else uses the 'structural' representation. + return nullptr; +} + +TemplateArgument::TemplateArgument(const ASTContext &Ctx, QualType Type, + const APValue &V, bool IsDefaulted) { + if (Type->isIntegralOrEnumerationType() && V.isInt()) + initFromIntegral(Ctx, V.getInt(), Type, IsDefaulted); + else if ((V.isLValue() && V.isNullPointer()) || + (V.isMemberPointer() && !V.getMemberPointerDecl())) + initFromType(Type, /*isNullPtr=*/true, IsDefaulted); + else if (const ValueDecl *VD = getAsSimpleValueDeclRef(Ctx, Type, V)) + // FIXME: The Declaration form should expose a const ValueDecl*. + initFromDeclaration(const_cast<ValueDecl *>(VD), Type, IsDefaulted); + else + initFromStructural(Ctx, Type, V, IsDefaulted); +} + TemplateArgument TemplateArgument::CreatePackCopy(ASTContext &Context, ArrayRef<TemplateArgument> Args) { @@ -217,6 +293,7 @@ TemplateArgumentDependence TemplateArgument::getDependence() const { case NullPtr: case Integral: + case StructuralValue: return TemplateArgumentDependence::None; case Expression: @@ -247,6 +324,7 @@ bool TemplateArgument::isPackExpansion() const { case Null: case Declaration: case Integral: + case StructuralValue: case Pack: case Template: case NullPtr: @@ -269,12 +347,12 @@ bool TemplateArgument::containsUnexpandedParameterPack() const { return getDependence() & TemplateArgumentDependence::UnexpandedPack; } -Optional<unsigned> TemplateArgument::getNumTemplateExpansions() const { +std::optional<unsigned> TemplateArgument::getNumTemplateExpansions() const { assert(getKind() == TemplateExpansion); if (TemplateArg.NumExpansions) return TemplateArg.NumExpansions - 1; - return None; + return std::nullopt; } QualType TemplateArgument::getNonTypeTemplateArgumentType() const { @@ -297,6 +375,9 @@ QualType TemplateArgument::getNonTypeTemplateArgumentType() const { case TemplateArgument::NullPtr: return getNullPtrType(); + + case TemplateArgument::StructuralValue: + return getStructuralValueType(); } llvm_unreachable("Invalid TemplateArgument Kind!"); @@ -319,30 +400,24 @@ void TemplateArgument::Profile(llvm::FoldingSetNodeID &ID, case Declaration: getParamTypeForDecl().Profile(ID); - ID.AddPointer(getAsDecl()? getAsDecl()->getCanonicalDecl() : nullptr); + ID.AddPointer(getAsDecl()); break; + case TemplateExpansion: + ID.AddInteger(TemplateArg.NumExpansions); + [[fallthrough]]; case Template: - case TemplateExpansion: { - TemplateName Template = getAsTemplateOrTemplatePattern(); - if (TemplateTemplateParmDecl *TTP - = dyn_cast_or_null<TemplateTemplateParmDecl>( - Template.getAsTemplateDecl())) { - ID.AddBoolean(true); - ID.AddInteger(TTP->getDepth()); - ID.AddInteger(TTP->getPosition()); - ID.AddBoolean(TTP->isParameterPack()); - } else { - ID.AddBoolean(false); - ID.AddPointer(Context.getCanonicalTemplateName(Template) - .getAsVoidPointer()); - } + ID.AddPointer(TemplateArg.Name); break; - } case Integral: - getAsIntegral().Profile(ID); getIntegralType().Profile(ID); + getAsIntegral().Profile(ID); + break; + + case StructuralValue: + getStructuralValueType().Profile(ID); + getAsStructuralValue().Profile(ID); break; case Expression: @@ -372,12 +447,24 @@ bool TemplateArgument::structurallyEquals(const TemplateArgument &Other) const { TemplateArg.NumExpansions == Other.TemplateArg.NumExpansions; case Declaration: - return getAsDecl() == Other.getAsDecl(); + return getAsDecl() == Other.getAsDecl() && + getParamTypeForDecl() == Other.getParamTypeForDecl(); case Integral: return getIntegralType() == Other.getIntegralType() && getAsIntegral() == Other.getAsIntegral(); + case StructuralValue: { + if (getStructuralValueType().getCanonicalType() != + Other.getStructuralValueType().getCanonicalType()) + return false; + + llvm::FoldingSetNodeID A, B; + getAsStructuralValue().Profile(A); + Other.getAsStructuralValue().Profile(B); + return A == B; + } + case Pack: if (Args.NumArgs != Other.Args.NumArgs) return false; for (unsigned I = 0, E = Args.NumArgs; I != E; ++I) @@ -404,6 +491,7 @@ TemplateArgument TemplateArgument::getPackExpansionPattern() const { case Declaration: case Integral: + case StructuralValue: case Pack: case Null: case Template: @@ -430,11 +518,11 @@ void TemplateArgument::print(const PrintingPolicy &Policy, raw_ostream &Out, } case Declaration: { - // FIXME: Include the type if it's not obvious from the context. NamedDecl *ND = getAsDecl(); if (getParamTypeForDecl()->isRecordType()) { if (auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) { - TPO->printAsInit(Out); + TPO->getType().getUnqualifiedType().print(Out, Policy); + TPO->printAsInit(Out, Policy); break; } } @@ -446,14 +534,19 @@ void TemplateArgument::print(const PrintingPolicy &Policy, raw_ostream &Out, break; } + case StructuralValue: + getAsStructuralValue().printPretty(Out, Policy, getStructuralValueType()); + break; + case NullPtr: // FIXME: Include the type if it's not obvious from the context. Out << "nullptr"; break; - case Template: + case Template: { getAsTemplate().print(Out, Policy); break; + } case TemplateExpansion: getAsTemplateOrTemplatePattern().print(Out, Policy); @@ -484,15 +577,6 @@ void TemplateArgument::print(const PrintingPolicy &Policy, raw_ostream &Out, } } -void TemplateArgument::dump(raw_ostream &Out) const { - LangOptions LO; // FIXME! see also TemplateName::dump(). - LO.CPlusPlus = true; - LO.Bool = true; - print(PrintingPolicy(LO), Out, /*IncludeType*/ true); -} - -LLVM_DUMP_METHOD void TemplateArgument::dump() const { dump(llvm::errs()); } - //===----------------------------------------------------------------------===// // TemplateArgumentLoc Implementation //===----------------------------------------------------------------------===// @@ -529,6 +613,9 @@ SourceRange TemplateArgumentLoc::getSourceRange() const { case TemplateArgument::Integral: return getSourceIntegralExpression()->getSourceRange(); + case TemplateArgument::StructuralValue: + return getSourceStructuralValueExpression()->getSourceRange(); + case TemplateArgument::Pack: case TemplateArgument::Null: return SourceRange(); @@ -557,6 +644,18 @@ static const T &DiagTemplateArg(const T &DB, const TemplateArgument &Arg) { case TemplateArgument::Integral: return DB << toString(Arg.getAsIntegral(), 10); + case TemplateArgument::StructuralValue: { + // FIXME: We're guessing at LangOptions! + SmallString<32> Str; + llvm::raw_svector_ostream OS(Str); + LangOptions LangOpts; + LangOpts.CPlusPlus = true; + PrintingPolicy Policy(LangOpts); + Arg.getAsStructuralValue().printPretty(OS, Policy, + Arg.getStructuralValueType()); + return DB << OS.str(); + } + case TemplateArgument::Template: return DB << Arg.getAsTemplate(); @@ -615,6 +714,17 @@ ASTTemplateArgumentListInfo::Create(const ASTContext &C, return new (Mem) ASTTemplateArgumentListInfo(List); } +const ASTTemplateArgumentListInfo * +ASTTemplateArgumentListInfo::Create(const ASTContext &C, + const ASTTemplateArgumentListInfo *List) { + if (!List) + return nullptr; + std::size_t size = + totalSizeToAlloc<TemplateArgumentLoc>(List->getNumTemplateArgs()); + void *Mem = C.Allocate(size, alignof(ASTTemplateArgumentListInfo)); + return new (Mem) ASTTemplateArgumentListInfo(List); +} + ASTTemplateArgumentListInfo::ASTTemplateArgumentListInfo( const TemplateArgumentListInfo &Info) { LAngleLoc = Info.getLAngleLoc(); @@ -626,6 +736,17 @@ ASTTemplateArgumentListInfo::ASTTemplateArgumentListInfo( new (&ArgBuffer[i]) TemplateArgumentLoc(Info[i]); } +ASTTemplateArgumentListInfo::ASTTemplateArgumentListInfo( + const ASTTemplateArgumentListInfo *Info) { + LAngleLoc = Info->getLAngleLoc(); + RAngleLoc = Info->getRAngleLoc(); + NumTemplateArgs = Info->getNumTemplateArgs(); + + TemplateArgumentLoc *ArgBuffer = getTrailingObjects<TemplateArgumentLoc>(); + for (unsigned i = 0; i != NumTemplateArgs; ++i) + new (&ArgBuffer[i]) TemplateArgumentLoc((*Info)[i]); +} + void ASTTemplateKWAndArgsInfo::initializeFrom( SourceLocation TemplateKWLoc, const TemplateArgumentListInfo &Info, TemplateArgumentLoc *OutArgArray) { diff --git a/contrib/llvm-project/clang/lib/AST/TemplateName.cpp b/contrib/llvm-project/clang/lib/AST/TemplateName.cpp index 22cfa9acbe1b..11544dbb56e3 100644 --- a/contrib/llvm-project/clang/lib/AST/TemplateName.cpp +++ b/contrib/llvm-project/clang/lib/AST/TemplateName.cpp @@ -13,6 +13,7 @@ #include "clang/AST/TemplateName.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclBase.h" +#include "clang/AST/DeclCXX.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DependenceFlags.h" #include "clang/AST/NestedNameSpecifier.h" @@ -28,37 +29,74 @@ #include "llvm/Support/Compiler.h" #include "llvm/Support/raw_ostream.h" #include <cassert> +#include <optional> #include <string> using namespace clang; TemplateArgument SubstTemplateTemplateParmPackStorage::getArgumentPack() const { - return TemplateArgument(llvm::makeArrayRef(Arguments, size())); + return TemplateArgument(llvm::ArrayRef(Arguments, Bits.Data)); +} + +TemplateTemplateParmDecl * +SubstTemplateTemplateParmPackStorage::getParameterPack() const { + return cast<TemplateTemplateParmDecl>( + getReplacedTemplateParameterList(getAssociatedDecl()) + ->asArray()[Bits.Index]); +} + +TemplateTemplateParmDecl * +SubstTemplateTemplateParmStorage::getParameter() const { + return cast<TemplateTemplateParmDecl>( + getReplacedTemplateParameterList(getAssociatedDecl()) + ->asArray()[Bits.Index]); } void SubstTemplateTemplateParmStorage::Profile(llvm::FoldingSetNodeID &ID) { - Profile(ID, Parameter, Replacement); + Profile(ID, Replacement, getAssociatedDecl(), getIndex(), getPackIndex()); +} + +void SubstTemplateTemplateParmStorage::Profile( + llvm::FoldingSetNodeID &ID, TemplateName Replacement, Decl *AssociatedDecl, + unsigned Index, std::optional<unsigned> PackIndex) { + Replacement.Profile(ID); + ID.AddPointer(AssociatedDecl); + ID.AddInteger(Index); + ID.AddInteger(PackIndex ? *PackIndex + 1 : 0); } -void SubstTemplateTemplateParmStorage::Profile(llvm::FoldingSetNodeID &ID, - TemplateTemplateParmDecl *parameter, - TemplateName replacement) { - ID.AddPointer(parameter); - ID.AddPointer(replacement.getAsVoidPointer()); +SubstTemplateTemplateParmPackStorage::SubstTemplateTemplateParmPackStorage( + ArrayRef<TemplateArgument> ArgPack, Decl *AssociatedDecl, unsigned Index, + bool Final) + : UncommonTemplateNameStorage(SubstTemplateTemplateParmPack, Index, + ArgPack.size()), + Arguments(ArgPack.data()), AssociatedDeclAndFinal(AssociatedDecl, Final) { + assert(AssociatedDecl != nullptr); } void SubstTemplateTemplateParmPackStorage::Profile(llvm::FoldingSetNodeID &ID, ASTContext &Context) { - Profile(ID, Context, Parameter, getArgumentPack()); + Profile(ID, Context, getArgumentPack(), getAssociatedDecl(), getIndex(), + getFinal()); } -void SubstTemplateTemplateParmPackStorage::Profile(llvm::FoldingSetNodeID &ID, - ASTContext &Context, - TemplateTemplateParmDecl *Parameter, - const TemplateArgument &ArgPack) { - ID.AddPointer(Parameter); +Decl *SubstTemplateTemplateParmPackStorage::getAssociatedDecl() const { + return AssociatedDeclAndFinal.getPointer(); +} + +bool SubstTemplateTemplateParmPackStorage::getFinal() const { + return AssociatedDeclAndFinal.getInt(); +} + +void SubstTemplateTemplateParmPackStorage::Profile( + llvm::FoldingSetNodeID &ID, ASTContext &Context, + const TemplateArgument &ArgPack, Decl *AssociatedDecl, unsigned Index, + bool Final) { ArgPack.Profile(ID, Context); + ID.AddPointer(AssociatedDecl); + ID.AddInteger(Index); + ID.AddBoolean(Final); } TemplateName::TemplateName(void *Ptr) { @@ -76,12 +114,18 @@ TemplateName::TemplateName(SubstTemplateTemplateParmPackStorage *Storage) : Storage(Storage) {} TemplateName::TemplateName(QualifiedTemplateName *Qual) : Storage(Qual) {} TemplateName::TemplateName(DependentTemplateName *Dep) : Storage(Dep) {} +TemplateName::TemplateName(UsingShadowDecl *Using) : Storage(Using) {} bool TemplateName::isNull() const { return Storage.isNull(); } TemplateName::NameKind TemplateName::getKind() const { - if (Storage.is<TemplateDecl *>()) + if (auto *ND = Storage.dyn_cast<Decl *>()) { + if (isa<UsingShadowDecl>(ND)) + return UsingTemplate; + assert(isa<TemplateDecl>(ND)); return Template; + } + if (Storage.is<DependentTemplateName *>()) return DependentTemplate; if (Storage.is<QualifiedTemplateName *>()) @@ -99,15 +143,23 @@ TemplateName::NameKind TemplateName::getKind() const { } TemplateDecl *TemplateName::getAsTemplateDecl() const { - if (TemplateDecl *Template = Storage.dyn_cast<TemplateDecl *>()) - return Template; + if (Decl *TemplateOrUsing = Storage.dyn_cast<Decl *>()) { + if (UsingShadowDecl *USD = dyn_cast<UsingShadowDecl>(TemplateOrUsing)) + return cast<TemplateDecl>(USD->getTargetDecl()); + + assert(isa<TemplateDecl>(TemplateOrUsing)); + return cast<TemplateDecl>(TemplateOrUsing); + } if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName()) - return QTN->getTemplateDecl(); + return QTN->getUnderlyingTemplate().getAsTemplateDecl(); if (SubstTemplateTemplateParmStorage *sub = getAsSubstTemplateTemplateParm()) return sub->getReplacement().getAsTemplateDecl(); + if (UsingShadowDecl *USD = getAsUsingShadowDecl()) + return cast<TemplateDecl>(USD->getTargetDecl()); + return nullptr; } @@ -153,29 +205,21 @@ DependentTemplateName *TemplateName::getAsDependentTemplateName() const { return Storage.dyn_cast<DependentTemplateName *>(); } -TemplateName TemplateName::getNameToSubstitute() const { - TemplateDecl *Decl = getAsTemplateDecl(); - - // Substituting a dependent template name: preserve it as written. - if (!Decl) - return *this; - - // If we have a template declaration, use the most recent non-friend - // declaration of that template. - Decl = cast<TemplateDecl>(Decl->getMostRecentDecl()); - while (Decl->getFriendObjectKind()) { - Decl = cast<TemplateDecl>(Decl->getPreviousDecl()); - assert(Decl && "all declarations of template are friends"); - } - return TemplateName(Decl); +UsingShadowDecl *TemplateName::getAsUsingShadowDecl() const { + if (Decl *D = Storage.dyn_cast<Decl *>()) + if (UsingShadowDecl *USD = dyn_cast<UsingShadowDecl>(D)) + return USD; + if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName()) + return QTN->getUnderlyingTemplate().getAsUsingShadowDecl(); + return nullptr; } TemplateNameDependence TemplateName::getDependence() const { auto D = TemplateNameDependence::None; switch (getKind()) { case TemplateName::NameKind::QualifiedTemplate: - D |= toTemplateNameDependence( - getAsQualifiedTemplateName()->getQualifier()->getDependence()); + if (NestedNameSpecifier *NNS = getAsQualifiedTemplateName()->getQualifier()) + D |= toTemplateNameDependence(NNS->getDependence()); break; case TemplateName::NameKind::DependentTemplate: D |= toTemplateNameDependence( @@ -220,37 +264,86 @@ bool TemplateName::containsUnexpandedParameterPack() const { return getDependence() & TemplateNameDependence::UnexpandedPack; } -void -TemplateName::print(raw_ostream &OS, const PrintingPolicy &Policy, - bool SuppressNNS) const { - if (TemplateDecl *Template = Storage.dyn_cast<TemplateDecl *>()) - OS << *Template; - else if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName()) { - if (!SuppressNNS) - QTN->getQualifier()->print(OS, Policy); +void TemplateName::Profile(llvm::FoldingSetNodeID &ID) { + if (const auto* USD = getAsUsingShadowDecl()) + ID.AddPointer(USD->getCanonicalDecl()); + else if (const auto *TD = getAsTemplateDecl()) + ID.AddPointer(TD->getCanonicalDecl()); + else + ID.AddPointer(Storage.getOpaqueValue()); +} + +void TemplateName::print(raw_ostream &OS, const PrintingPolicy &Policy, + Qualified Qual) const { + auto handleAnonymousTTP = [](TemplateDecl *TD, raw_ostream &OS) { + if (TemplateTemplateParmDecl *TTP = dyn_cast<TemplateTemplateParmDecl>(TD); + TTP && TTP->getIdentifier() == nullptr) { + OS << "template-parameter-" << TTP->getDepth() << "-" << TTP->getIndex(); + return true; + } + return false; + }; + if (NameKind Kind = getKind(); + Kind == TemplateName::Template || Kind == TemplateName::UsingTemplate) { + // After `namespace ns { using std::vector }`, what is the fully-qualified + // name of the UsingTemplateName `vector` within ns? + // + // - ns::vector (the qualified name of the using-shadow decl) + // - std::vector (the qualified name of the underlying template decl) + // + // Similar to the UsingType behavior, using declarations are used to import + // names more often than to export them, thus using the original name is + // most useful in this case. + TemplateDecl *Template = getAsTemplateDecl(); + if (handleAnonymousTTP(Template, OS)) + return; + if (Qual == Qualified::None) + OS << *Template; + else + Template->printQualifiedName(OS, Policy); + } else if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName()) { + if (NestedNameSpecifier *NNS = QTN->getQualifier(); + Qual != Qualified::None && NNS) + NNS->print(OS, Policy); if (QTN->hasTemplateKeyword()) OS << "template "; - OS << *QTN->getDecl(); + + TemplateName Underlying = QTN->getUnderlyingTemplate(); + assert(Underlying.getKind() == TemplateName::Template || + Underlying.getKind() == TemplateName::UsingTemplate); + + TemplateDecl *UTD = Underlying.getAsTemplateDecl(); + + if (handleAnonymousTTP(UTD, OS)) + return; + + if (IdentifierInfo *II = UTD->getIdentifier(); + Policy.CleanUglifiedParameters && II && + isa<TemplateTemplateParmDecl>(UTD)) + OS << II->deuglifiedName(); + else + OS << *UTD; } else if (DependentTemplateName *DTN = getAsDependentTemplateName()) { - if (!SuppressNNS && DTN->getQualifier()) - DTN->getQualifier()->print(OS, Policy); + if (NestedNameSpecifier *NNS = DTN->getQualifier()) + NNS->print(OS, Policy); OS << "template "; if (DTN->isIdentifier()) OS << DTN->getIdentifier()->getName(); else OS << "operator " << getOperatorSpelling(DTN->getOperator()); - } else if (SubstTemplateTemplateParmStorage *subst - = getAsSubstTemplateTemplateParm()) { - subst->getReplacement().print(OS, Policy, SuppressNNS); - } else if (SubstTemplateTemplateParmPackStorage *SubstPack - = getAsSubstTemplateTemplateParmPack()) + } else if (SubstTemplateTemplateParmStorage *subst = + getAsSubstTemplateTemplateParm()) { + subst->getReplacement().print(OS, Policy, Qual); + } else if (SubstTemplateTemplateParmPackStorage *SubstPack = + getAsSubstTemplateTemplateParmPack()) OS << *SubstPack->getParameterPack(); else if (AssumedTemplateStorage *Assumed = getAsAssumedTemplateName()) { Assumed->getDeclName().print(OS, Policy); } else { + assert(getKind() == TemplateName::OverloadedTemplate); OverloadedTemplateStorage *OTS = getAsOverloadedTemplate(); - (*OTS->begin())->printName(OS); + (*OTS->begin())->printName(OS, Policy); } } @@ -267,14 +360,3 @@ const StreamingDiagnostic &clang::operator<<(const StreamingDiagnostic &DB, OS.flush(); return DB << NameStr; } - -void TemplateName::dump(raw_ostream &OS) const { - LangOptions LO; // FIXME! - LO.CPlusPlus = true; - LO.Bool = true; - print(OS, PrintingPolicy(LO)); -} - -LLVM_DUMP_METHOD void TemplateName::dump() const { - dump(llvm::errs()); -} diff --git a/contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp b/contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp index 33f914f9f886..5ba952350425 100644 --- a/contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp +++ b/contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp @@ -16,7 +16,9 @@ #include "clang/AST/DeclOpenMP.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/LocInfoType.h" +#include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/Type.h" +#include "clang/AST/TypeLocVisitor.h" #include "clang/Basic/Module.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/Specifiers.h" @@ -239,6 +241,27 @@ void TextNodeDumper::Visit(QualType T) { OS << " " << T.split().Quals.getAsString(); } +void TextNodeDumper::Visit(TypeLoc TL) { + if (!TL) { + ColorScope Color(OS, ShowColors, NullColor); + OS << "<<<NULL>>>"; + return; + } + + { + ColorScope Color(OS, ShowColors, TypeColor); + OS << (TL.getTypeLocClass() == TypeLoc::Qualified + ? "Qualified" + : TL.getType()->getTypeClassName()) + << "TypeLoc"; + } + dumpSourceRange(TL.getSourceRange()); + OS << ' '; + dumpBareType(TL.getType(), /*Desugar=*/false); + + TypeLocVisitor<TextNodeDumper>::Visit(TL); +} + void TextNodeDumper::Visit(const Decl *D) { if (!D) { ColorScope Color(OS, ShowColors, NullColor); @@ -283,6 +306,10 @@ void TextNodeDumper::Visit(const Decl *D) { OS << " constexpr"; if (FD->isConsteval()) OS << " consteval"; + else if (FD->isImmediateFunction()) + OS << " immediate"; + if (FD->isMultiVersion()) + OS << " multiversion"; } if (!isa<FunctionDecl>(*D)) { @@ -296,6 +323,17 @@ void TextNodeDumper::Visit(const Decl *D) { } } + switch (D->getFriendObjectKind()) { + case Decl::FOK_None: + break; + case Decl::FOK_Declared: + OS << " friend"; + break; + case Decl::FOK_Undeclared: + OS << " friend_undeclared"; + break; + } + ConstDeclVisitor<TextNodeDumper>::Visit(D); } @@ -343,6 +381,98 @@ void TextNodeDumper::Visit(const OMPClause *C) { OS << " <implicit>"; } +void TextNodeDumper::Visit(const OpenACCClause *C) { + if (!C) { + ColorScope Color(OS, ShowColors, NullColor); + OS << "<<<NULL>>> OpenACCClause"; + return; + } + { + ColorScope Color(OS, ShowColors, AttrColor); + OS << C->getClauseKind(); + + // Handle clauses with parens for types that have no children, likely + // because there is no sub expression. + switch (C->getClauseKind()) { + case OpenACCClauseKind::Default: + OS << '(' << cast<OpenACCDefaultClause>(C)->getDefaultClauseKind() << ')'; + break; + case OpenACCClauseKind::Async: + case OpenACCClauseKind::Auto: + case OpenACCClauseKind::Attach: + case OpenACCClauseKind::Copy: + case OpenACCClauseKind::PCopy: + case OpenACCClauseKind::PresentOrCopy: + case OpenACCClauseKind::If: + case OpenACCClauseKind::Independent: + case OpenACCClauseKind::DevicePtr: + case OpenACCClauseKind::FirstPrivate: + case OpenACCClauseKind::NoCreate: + case OpenACCClauseKind::NumGangs: + case OpenACCClauseKind::NumWorkers: + case OpenACCClauseKind::Present: + case OpenACCClauseKind::Private: + case OpenACCClauseKind::Self: + case OpenACCClauseKind::Seq: + case OpenACCClauseKind::VectorLength: + // The condition expression will be printed as a part of the 'children', + // but print 'clause' here so it is clear what is happening from the dump. + OS << " clause"; + break; + case OpenACCClauseKind::CopyIn: + case OpenACCClauseKind::PCopyIn: + case OpenACCClauseKind::PresentOrCopyIn: + OS << " clause"; + if (cast<OpenACCCopyInClause>(C)->isReadOnly()) + OS << " : readonly"; + break; + case OpenACCClauseKind::CopyOut: + case OpenACCClauseKind::PCopyOut: + case OpenACCClauseKind::PresentOrCopyOut: + OS << " clause"; + if (cast<OpenACCCopyOutClause>(C)->isZero()) + OS << " : zero"; + break; + case OpenACCClauseKind::Create: + case OpenACCClauseKind::PCreate: + case OpenACCClauseKind::PresentOrCreate: + OS << " clause"; + if (cast<OpenACCCreateClause>(C)->isZero()) + OS << " : zero"; + break; + case OpenACCClauseKind::Wait: + OS << " clause"; + if (cast<OpenACCWaitClause>(C)->hasDevNumExpr()) + OS << " has devnum"; + if (cast<OpenACCWaitClause>(C)->hasQueuesTag()) + OS << " has queues tag"; + break; + case OpenACCClauseKind::DeviceType: + case OpenACCClauseKind::DType: + OS << "("; + llvm::interleaveComma( + cast<OpenACCDeviceTypeClause>(C)->getArchitectures(), OS, + [&](const DeviceTypeArgument &Arch) { + if (Arch.first == nullptr) + OS << "*"; + else + OS << Arch.first->getName(); + }); + OS << ")"; + break; + case OpenACCClauseKind::Reduction: + OS << " clause Operator: " + << cast<OpenACCReductionClause>(C)->getReductionOp(); + break; + default: + // Nothing to do here. + break; + } + } + dumpPointer(C); + dumpSourceRange(SourceRange(C->getBeginLoc(), C->getEndLoc())); +} + void TextNodeDumper::Visit(const GenericSelectionExpr::ConstAssociation &A) { const TypeSourceInfo *TSI = A.getTypeSourceInfo(); if (TSI) { @@ -356,6 +486,20 @@ void TextNodeDumper::Visit(const GenericSelectionExpr::ConstAssociation &A) { OS << " selected"; } +void TextNodeDumper::Visit(const ConceptReference *R) { + if (!R) { + ColorScope Color(OS, ShowColors, NullColor); + OS << "<<<NULL>>> ConceptReference"; + return; + } + + OS << "ConceptReference"; + dumpPointer(R); + dumpSourceRange(R->getSourceRange()); + OS << ' '; + dumpBareDeclRef(R->getNamedConcept()); +} + void TextNodeDumper::Visit(const concepts::Requirement *R) { if (!R) { ColorScope Color(OS, ShowColors, NullColor); @@ -662,13 +806,18 @@ void TextNodeDumper::dumpBareType(QualType T, bool Desugar) { ColorScope Color(OS, ShowColors, TypeColor); SplitQualType T_split = T.split(); - OS << "'" << QualType::getAsString(T_split, PrintPolicy) << "'"; + std::string T_str = QualType::getAsString(T_split, PrintPolicy); + OS << "'" << T_str << "'"; if (Desugar && !T.isNull()) { - // If the type is sugared, also dump a (shallow) desugared type. + // If the type is sugared, also dump a (shallow) desugared type when + // it is visibly different. SplitQualType D_split = T.getSplitDesugaredType(); - if (T_split != D_split) - OS << ":'" << QualType::getAsString(D_split, PrintPolicy) << "'"; + if (T_split != D_split) { + std::string D_str = QualType::getAsString(D_split, PrintPolicy); + if (T_str != D_str) + OS << ":'" << QualType::getAsString(D_split, PrintPolicy) << "'"; + } } } @@ -730,6 +879,66 @@ void TextNodeDumper::dumpCleanupObject( llvm_unreachable("unexpected cleanup type"); } +void clang::TextNodeDumper::dumpTemplateSpecializationKind( + TemplateSpecializationKind TSK) { + switch (TSK) { + case TSK_Undeclared: + break; + case TSK_ImplicitInstantiation: + OS << " implicit_instantiation"; + break; + case TSK_ExplicitSpecialization: + OS << " explicit_specialization"; + break; + case TSK_ExplicitInstantiationDeclaration: + OS << " explicit_instantiation_declaration"; + break; + case TSK_ExplicitInstantiationDefinition: + OS << " explicit_instantiation_definition"; + break; + } +} + +void clang::TextNodeDumper::dumpNestedNameSpecifier(const NestedNameSpecifier *NNS) { + if (!NNS) + return; + + AddChild([=] { + OS << "NestedNameSpecifier"; + + switch (NNS->getKind()) { + case NestedNameSpecifier::Identifier: + OS << " Identifier"; + OS << " '" << NNS->getAsIdentifier()->getName() << "'"; + break; + case NestedNameSpecifier::Namespace: + OS << " "; // "Namespace" is printed as the decl kind. + dumpBareDeclRef(NNS->getAsNamespace()); + break; + case NestedNameSpecifier::NamespaceAlias: + OS << " "; // "NamespaceAlias" is printed as the decl kind. + dumpBareDeclRef(NNS->getAsNamespaceAlias()); + break; + case NestedNameSpecifier::TypeSpec: + OS << " TypeSpec"; + dumpType(QualType(NNS->getAsType(), 0)); + break; + case NestedNameSpecifier::TypeSpecWithTemplate: + OS << " TypeSpecWithTemplate"; + dumpType(QualType(NNS->getAsType(), 0)); + break; + case NestedNameSpecifier::Global: + OS << " Global"; + break; + case NestedNameSpecifier::Super: + OS << " Super"; + break; + } + + dumpNestedNameSpecifier(NNS->getPrefix()); + }); +} + void TextNodeDumper::dumpDeclRef(const Decl *D, StringRef Label) { if (!D) return; @@ -741,6 +950,29 @@ void TextNodeDumper::dumpDeclRef(const Decl *D, StringRef Label) { }); } +void TextNodeDumper::dumpTemplateArgument(const TemplateArgument &TA) { + llvm::SmallString<128> Str; + { + llvm::raw_svector_ostream SS(Str); + TA.print(PrintPolicy, SS, /*IncludeType=*/true); + } + OS << " '" << Str << "'"; + + if (!Context) + return; + + if (TemplateArgument CanonTA = Context->getCanonicalTemplateArgument(TA); + !CanonTA.structurallyEquals(TA)) { + llvm::SmallString<128> CanonStr; + { + llvm::raw_svector_ostream SS(CanonStr); + CanonTA.print(PrintPolicy, SS, /*IncludeType=*/true); + } + if (CanonStr != Str) + OS << ":'" << CanonStr << "'"; + } +} + const char *TextNodeDumper::getCommandName(unsigned CommandID) { if (Traits) return Traits->getCommandInfo(CommandID)->Name; @@ -767,19 +999,19 @@ void TextNodeDumper::visitInlineCommandComment( const comments::InlineCommandComment *C, const comments::FullComment *) { OS << " Name=\"" << getCommandName(C->getCommandID()) << "\""; switch (C->getRenderKind()) { - case comments::InlineCommandComment::RenderNormal: + case comments::InlineCommandRenderKind::Normal: OS << " RenderNormal"; break; - case comments::InlineCommandComment::RenderBold: + case comments::InlineCommandRenderKind::Bold: OS << " RenderBold"; break; - case comments::InlineCommandComment::RenderMonospaced: + case comments::InlineCommandRenderKind::Monospaced: OS << " RenderMonospaced"; break; - case comments::InlineCommandComment::RenderEmphasized: + case comments::InlineCommandRenderKind::Emphasized: OS << " RenderEmphasized"; break; - case comments::InlineCommandComment::RenderAnchor: + case comments::InlineCommandRenderKind::Anchor: OS << " RenderAnchor"; break; } @@ -880,40 +1112,128 @@ void TextNodeDumper::VisitNullTemplateArgument(const TemplateArgument &) { void TextNodeDumper::VisitTypeTemplateArgument(const TemplateArgument &TA) { OS << " type"; - dumpType(TA.getAsType()); + dumpTemplateArgument(TA); } void TextNodeDumper::VisitDeclarationTemplateArgument( const TemplateArgument &TA) { OS << " decl"; + dumpTemplateArgument(TA); dumpDeclRef(TA.getAsDecl()); } -void TextNodeDumper::VisitNullPtrTemplateArgument(const TemplateArgument &) { +void TextNodeDumper::VisitNullPtrTemplateArgument(const TemplateArgument &TA) { OS << " nullptr"; + dumpTemplateArgument(TA); } void TextNodeDumper::VisitIntegralTemplateArgument(const TemplateArgument &TA) { - OS << " integral " << TA.getAsIntegral(); + OS << " integral"; + dumpTemplateArgument(TA); +} + +void TextNodeDumper::dumpTemplateName(TemplateName TN, StringRef Label) { + AddChild(Label, [=] { + { + llvm::SmallString<128> Str; + { + llvm::raw_svector_ostream SS(Str); + TN.print(SS, PrintPolicy); + } + OS << "'" << Str << "'"; + + if (Context) { + if (TemplateName CanonTN = Context->getCanonicalTemplateName(TN); + CanonTN != TN) { + llvm::SmallString<128> CanonStr; + { + llvm::raw_svector_ostream SS(CanonStr); + CanonTN.print(SS, PrintPolicy); + } + if (CanonStr != Str) + OS << ":'" << CanonStr << "'"; + } + } + } + dumpBareTemplateName(TN); + }); +} + +void TextNodeDumper::dumpBareTemplateName(TemplateName TN) { + switch (TN.getKind()) { + case TemplateName::Template: + AddChild([=] { Visit(TN.getAsTemplateDecl()); }); + return; + case TemplateName::UsingTemplate: { + const UsingShadowDecl *USD = TN.getAsUsingShadowDecl(); + AddChild([=] { Visit(USD); }); + AddChild("target", [=] { Visit(USD->getTargetDecl()); }); + return; + } + case TemplateName::QualifiedTemplate: { + OS << " qualified"; + const QualifiedTemplateName *QTN = TN.getAsQualifiedTemplateName(); + if (QTN->hasTemplateKeyword()) + OS << " keyword"; + dumpNestedNameSpecifier(QTN->getQualifier()); + dumpBareTemplateName(QTN->getUnderlyingTemplate()); + return; + } + case TemplateName::DependentTemplate: { + OS << " dependent"; + const DependentTemplateName *DTN = TN.getAsDependentTemplateName(); + dumpNestedNameSpecifier(DTN->getQualifier()); + return; + } + case TemplateName::SubstTemplateTemplateParm: { + OS << " subst"; + const SubstTemplateTemplateParmStorage *STS = + TN.getAsSubstTemplateTemplateParm(); + OS << " index " << STS->getIndex(); + if (std::optional<unsigned int> PackIndex = STS->getPackIndex()) + OS << " pack_index " << *PackIndex; + if (const TemplateTemplateParmDecl *P = STS->getParameter()) + AddChild("parameter", [=] { Visit(P); }); + dumpDeclRef(STS->getAssociatedDecl(), "associated"); + dumpTemplateName(STS->getReplacement(), "replacement"); + return; + } + // FIXME: Implement these. + case TemplateName::OverloadedTemplate: + OS << " overloaded"; + return; + case TemplateName::AssumedTemplate: + OS << " assumed"; + return; + case TemplateName::SubstTemplateTemplateParmPack: + OS << " subst_pack"; + return; + } + llvm_unreachable("Unexpected TemplateName Kind"); } void TextNodeDumper::VisitTemplateTemplateArgument(const TemplateArgument &TA) { - OS << " template "; - TA.getAsTemplate().dump(OS); + OS << " template"; + dumpTemplateArgument(TA); + dumpBareTemplateName(TA.getAsTemplate()); } void TextNodeDumper::VisitTemplateExpansionTemplateArgument( const TemplateArgument &TA) { - OS << " template expansion "; - TA.getAsTemplateOrTemplatePattern().dump(OS); + OS << " template expansion"; + dumpTemplateArgument(TA); + dumpBareTemplateName(TA.getAsTemplateOrTemplatePattern()); } -void TextNodeDumper::VisitExpressionTemplateArgument(const TemplateArgument &) { +void TextNodeDumper::VisitExpressionTemplateArgument( + const TemplateArgument &TA) { OS << " expr"; + dumpTemplateArgument(TA); } -void TextNodeDumper::VisitPackTemplateArgument(const TemplateArgument &) { +void TextNodeDumper::VisitPackTemplateArgument(const TemplateArgument &TA) { OS << " pack"; + dumpTemplateArgument(TA); } static void dumpBasePath(raw_ostream &OS, const CastExpr *Node) { @@ -948,6 +1268,14 @@ void TextNodeDumper::VisitIfStmt(const IfStmt *Node) { OS << " has_var"; if (Node->hasElseStorage()) OS << " has_else"; + if (Node->isConstexpr()) + OS << " constexpr"; + if (Node->isConsteval()) { + OS << " "; + if (Node->isNegatedConsteval()) + OS << "!"; + OS << "consteval"; + } } void TextNodeDumper::VisitSwitchStmt(const SwitchStmt *Node) { @@ -978,6 +1306,24 @@ void TextNodeDumper::VisitCaseStmt(const CaseStmt *Node) { OS << " gnu_range"; } +void clang::TextNodeDumper::VisitReturnStmt(const ReturnStmt *Node) { + if (const VarDecl *Cand = Node->getNRVOCandidate()) { + OS << " nrvo_candidate("; + dumpBareDeclRef(Cand); + OS << ")"; + } +} + +void clang::TextNodeDumper::VisitCoawaitExpr(const CoawaitExpr *Node) { + if (Node->isImplicit()) + OS << " implicit"; +} + +void clang::TextNodeDumper::VisitCoreturnStmt(const CoreturnStmt *Node) { + if (Node->isImplicit()) + OS << " implicit"; +} + void TextNodeDumper::VisitConstantExpr(const ConstantExpr *Node) { if (Node->hasAPValueResult()) AddChild("value", @@ -1020,6 +1366,7 @@ void TextNodeDumper::VisitImplicitCastExpr(const ImplicitCastExpr *Node) { void TextNodeDumper::VisitDeclRefExpr(const DeclRefExpr *Node) { OS << " "; dumpBareDeclRef(Node->getDecl()); + dumpNestedNameSpecifier(Node->getQualifier()); if (Node->getDecl() != Node->getFoundDecl()) { OS << " ("; dumpBareDeclRef(Node->getFoundDecl()); @@ -1031,6 +1378,19 @@ void TextNodeDumper::VisitDeclRefExpr(const DeclRefExpr *Node) { case NOUR_Constant: OS << " non_odr_use_constant"; break; case NOUR_Discarded: OS << " non_odr_use_discarded"; break; } + if (Node->isCapturedByCopyInLambdaWithExplicitObjectParameter()) + OS << " dependent_capture"; + else if (Node->refersToEnclosingVariableOrCapture()) + OS << " refers_to_enclosing_variable_or_capture"; + + if (Node->isImmediateEscalating()) + OS << " immediate-escalating"; +} + +void clang::TextNodeDumper::VisitDependentScopeDeclRefExpr( + const DependentScopeDeclRefExpr *Node) { + + dumpNestedNameSpecifier(Node->getQualifier()); } void TextNodeDumper::VisitUnresolvedLookupExpr( @@ -1127,6 +1487,7 @@ void TextNodeDumper::VisitUnaryExprOrTypeTraitExpr( void TextNodeDumper::VisitMemberExpr(const MemberExpr *Node) { OS << " " << (Node->isArrow() ? "->" : ".") << *Node->getMemberDecl(); dumpPointer(Node->getMemberDecl()); + dumpNestedNameSpecifier(Node->getQualifier()); switch (Node->isNonOdrUse()) { case NOUR_None: break; case NOUR_Unevaluated: OS << " non_odr_use_unevaluated"; break; @@ -1177,6 +1538,8 @@ void TextNodeDumper::VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *Node) { void TextNodeDumper::VisitCXXThisExpr(const CXXThisExpr *Node) { if (Node->isImplicit()) OS << " implicit"; + if (Node->isCapturedByCopyInLambdaWithExplicitObjectParameter()) + OS << " dependent_capture"; OS << " this"; } @@ -1212,6 +1575,8 @@ void TextNodeDumper::VisitCXXConstructExpr(const CXXConstructExpr *Node) { OS << " std::initializer_list"; if (Node->requiresZeroInitialization()) OS << " zeroing"; + if (Node->isImmediateEscalating()) + OS << " immediate-escalating"; } void TextNodeDumper::VisitCXXBindTemporaryExpr( @@ -1257,6 +1622,16 @@ void TextNodeDumper::VisitExpressionTraitExpr(const ExpressionTraitExpr *Node) { OS << " " << getTraitSpelling(Node->getTrait()); } +void TextNodeDumper::VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *Node) { + if (Node->hasRewrittenInit()) + OS << " has rewritten init"; +} + +void TextNodeDumper::VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *Node) { + if (Node->hasRewrittenInit()) + OS << " has rewritten init"; +} + void TextNodeDumper::VisitMaterializeTemporaryExpr( const MaterializeTemporaryExpr *Node) { if (const ValueDecl *VD = Node->getExtendingDecl()) { @@ -1419,12 +1794,12 @@ void TextNodeDumper::VisitRValueReferenceType(const ReferenceType *T) { void TextNodeDumper::VisitArrayType(const ArrayType *T) { switch (T->getSizeModifier()) { - case ArrayType::Normal: + case ArraySizeModifier::Normal: break; - case ArrayType::Static: + case ArraySizeModifier::Static: OS << " static"; break; - case ArrayType::Star: + case ArraySizeModifier::Star: OS << " *"; break; } @@ -1457,29 +1832,35 @@ void TextNodeDumper::VisitDependentSizedExtVectorType( void TextNodeDumper::VisitVectorType(const VectorType *T) { switch (T->getVectorKind()) { - case VectorType::GenericVector: + case VectorKind::Generic: break; - case VectorType::AltiVecVector: + case VectorKind::AltiVecVector: OS << " altivec"; break; - case VectorType::AltiVecPixel: + case VectorKind::AltiVecPixel: OS << " altivec pixel"; break; - case VectorType::AltiVecBool: + case VectorKind::AltiVecBool: OS << " altivec bool"; break; - case VectorType::NeonVector: + case VectorKind::Neon: OS << " neon"; break; - case VectorType::NeonPolyVector: + case VectorKind::NeonPoly: OS << " neon poly"; break; - case VectorType::SveFixedLengthDataVector: + case VectorKind::SveFixedLengthData: OS << " fixed-length sve data vector"; break; - case VectorType::SveFixedLengthPredicateVector: + case VectorKind::SveFixedLengthPredicate: OS << " fixed-length sve predicate vector"; break; + case VectorKind::RVVFixedLengthData: + OS << " fixed-length rvv data vector"; + break; + case VectorKind::RVVFixedLengthMask: + OS << " fixed-length rvv mask vector"; + break; } OS << " " << T->getNumElements(); } @@ -1517,7 +1898,64 @@ void TextNodeDumper::VisitFunctionProtoType(const FunctionProtoType *T) { OS << " &&"; break; } - // FIXME: Exception specification. + + switch (EPI.ExceptionSpec.Type) { + case EST_None: + break; + case EST_DynamicNone: + OS << " exceptionspec_dynamic_none"; + break; + case EST_Dynamic: + OS << " exceptionspec_dynamic"; + break; + case EST_MSAny: + OS << " exceptionspec_ms_any"; + break; + case EST_NoThrow: + OS << " exceptionspec_nothrow"; + break; + case EST_BasicNoexcept: + OS << " exceptionspec_basic_noexcept"; + break; + case EST_DependentNoexcept: + OS << " exceptionspec_dependent_noexcept"; + break; + case EST_NoexceptFalse: + OS << " exceptionspec_noexcept_false"; + break; + case EST_NoexceptTrue: + OS << " exceptionspec_noexcept_true"; + break; + case EST_Unevaluated: + OS << " exceptionspec_unevaluated"; + break; + case EST_Uninstantiated: + OS << " exceptionspec_uninstantiated"; + break; + case EST_Unparsed: + OS << " exceptionspec_unparsed"; + break; + } + if (!EPI.ExceptionSpec.Exceptions.empty()) { + AddChild([=] { + OS << "Exceptions:"; + for (unsigned I = 0, N = EPI.ExceptionSpec.Exceptions.size(); I != N; + ++I) { + if (I) + OS << ","; + dumpType(EPI.ExceptionSpec.Exceptions[I]); + } + }); + } + if (EPI.ExceptionSpec.NoexceptExpr) { + AddChild([=] { + OS << "NoexceptExpr: "; + Visit(EPI.ExceptionSpec.NoexceptExpr); + }); + } + dumpDeclRef(EPI.ExceptionSpec.SourceDecl, "ExceptionSourceDecl"); + dumpDeclRef(EPI.ExceptionSpec.SourceTemplate, "ExceptionSourceTemplate"); + // FIXME: Consumed parameters. VisitFunctionType(T); } @@ -1526,15 +1964,25 @@ void TextNodeDumper::VisitUnresolvedUsingType(const UnresolvedUsingType *T) { dumpDeclRef(T->getDecl()); } +void TextNodeDumper::VisitUsingType(const UsingType *T) { + dumpDeclRef(T->getFoundDecl()); + if (!T->typeMatchesDecl()) + OS << " divergent"; +} + void TextNodeDumper::VisitTypedefType(const TypedefType *T) { dumpDeclRef(T->getDecl()); + if (!T->typeMatchesDecl()) + OS << " divergent"; } void TextNodeDumper::VisitUnaryTransformType(const UnaryTransformType *T) { switch (T->getUTTKind()) { - case UnaryTransformType::EnumUnderlyingType: - OS << " underlying_type"; +#define TRANSFORM_TYPE_TRAIT_DEF(Enum, Trait) \ + case UnaryTransformType::Enum: \ + OS << " " #Trait; \ break; +#include "clang/Basic/TransformTypeTraits.def" } } @@ -1549,24 +1997,39 @@ void TextNodeDumper::VisitTemplateTypeParmType(const TemplateTypeParmType *T) { dumpDeclRef(T->getDecl()); } +void TextNodeDumper::VisitSubstTemplateTypeParmType( + const SubstTemplateTypeParmType *T) { + dumpDeclRef(T->getAssociatedDecl()); + VisitTemplateTypeParmDecl(T->getReplacedParameter()); + if (auto PackIndex = T->getPackIndex()) + OS << " pack_index " << *PackIndex; +} + +void TextNodeDumper::VisitSubstTemplateTypeParmPackType( + const SubstTemplateTypeParmPackType *T) { + dumpDeclRef(T->getAssociatedDecl()); + VisitTemplateTypeParmDecl(T->getReplacedParameter()); +} + void TextNodeDumper::VisitAutoType(const AutoType *T) { if (T->isDecltypeAuto()) OS << " decltype(auto)"; if (!T->isDeduced()) OS << " undeduced"; - if (T->isConstrained()) { + if (T->isConstrained()) dumpDeclRef(T->getTypeConstraintConcept()); - for (const auto &Arg : T->getTypeConstraintArguments()) - VisitTemplateArgument(Arg); - } +} + +void TextNodeDumper::VisitDeducedTemplateSpecializationType( + const DeducedTemplateSpecializationType *T) { + dumpTemplateName(T->getTemplateName(), "name"); } void TextNodeDumper::VisitTemplateSpecializationType( const TemplateSpecializationType *T) { if (T->isTypeAlias()) OS << " alias"; - OS << " "; - T->getTemplateName().dump(OS); + dumpTemplateName(T->getTemplateName(), "name"); } void TextNodeDumper::VisitInjectedClassNameType( @@ -1583,6 +2046,13 @@ void TextNodeDumper::VisitPackExpansionType(const PackExpansionType *T) { OS << " expansions " << *N; } +void TextNodeDumper::VisitTypeLoc(TypeLoc TL) { + // By default, add extra Type details with no extra loc info. + TypeVisitor<TextNodeDumper>::Visit(TL.getTypePtr()); +} +// FIXME: override behavior for TypeLocs that have interesting location +// information, such as the qualifier in ElaboratedTypeLoc. + void TextNodeDumper::VisitLabelDecl(const LabelDecl *D) { dumpName(D); } void TextNodeDumper::VisitTypedefDecl(const TypedefDecl *D) { @@ -1631,6 +2101,7 @@ void TextNodeDumper::VisitIndirectFieldDecl(const IndirectFieldDecl *D) { void TextNodeDumper::VisitFunctionDecl(const FunctionDecl *D) { dumpName(D); dumpType(D->getType()); + dumpTemplateSpecializationKind(D->getTemplateSpecializationKind()); StorageClass SC = D->getStorageClass(); if (SC != SC_None) @@ -1642,7 +2113,7 @@ void TextNodeDumper::VisitFunctionDecl(const FunctionDecl *D) { if (D->isModulePrivate()) OS << " __module_private__"; - if (D->isPure()) + if (D->isPureVirtual()) OS << " pure"; if (D->isDefaulted()) { OS << " default"; @@ -1654,6 +2125,12 @@ void TextNodeDumper::VisitFunctionDecl(const FunctionDecl *D) { if (D->isTrivial()) OS << " trivial"; + if (const StringLiteral *M = D->getDeletedMessage()) + AddChild("delete message", [=] { Visit(M); }); + + if (D->isIneligibleOrNotSelected()) + OS << (isa<CXXDestructorDecl>(D) ? " not_selected" : " ineligible"); + if (const auto *FPT = D->getType()->getAs<FunctionProtoType>()) { FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); switch (EPI.ExceptionSpec.Type) { @@ -1680,8 +2157,7 @@ void TextNodeDumper::VisitFunctionDecl(const FunctionDecl *D) { auto Overrides = MD->overridden_methods(); OS << "Overrides: [ "; dumpOverride(*Overrides.begin()); - for (const auto *Override : - llvm::make_range(Overrides.begin() + 1, Overrides.end())) { + for (const auto *Override : llvm::drop_begin(Overrides)) { OS << ", "; dumpOverride(Override); } @@ -1690,12 +2166,33 @@ void TextNodeDumper::VisitFunctionDecl(const FunctionDecl *D) { } } + if (!D->isInlineSpecified() && D->isInlined()) { + OS << " implicit-inline"; + } // Since NumParams comes from the FunctionProtoType of the FunctionDecl and // the Params are set later, it is possible for a dump during debugging to // encounter a FunctionDecl that has been created but hasn't been assigned // ParmVarDecls yet. if (!D->param_empty() && !D->param_begin()) OS << " <<<NULL params x " << D->getNumParams() << ">>>"; + + if (const auto *Instance = D->getInstantiatedFromMemberFunction()) { + OS << " instantiated_from"; + dumpPointer(Instance); + } +} + +void TextNodeDumper::VisitCXXDeductionGuideDecl( + const CXXDeductionGuideDecl *D) { + VisitFunctionDecl(D); + switch (D->getDeductionCandidateKind()) { + case DeductionCandidate::Normal: + case DeductionCandidate::Copy: + return; + case DeductionCandidate::Aggregate: + OS << " aggregate "; + break; + } } void TextNodeDumper::VisitLifetimeExtendedTemporaryDecl( @@ -1719,8 +2216,14 @@ void TextNodeDumper::VisitFieldDecl(const FieldDecl *D) { } void TextNodeDumper::VisitVarDecl(const VarDecl *D) { + dumpNestedNameSpecifier(D->getQualifier()); dumpName(D); + if (const auto *P = dyn_cast<ParmVarDecl>(D); + P && P->isExplicitObjectParameter()) + OS << " this"; + dumpType(D->getType()); + dumpTemplateSpecializationKind(D->getTemplateSpecializationKind()); StorageClass SC = D->getStorageClass(); if (SC != SC_None) OS << ' ' << VarDecl::getStorageClassSpecifierString(SC); @@ -1753,6 +2256,8 @@ void TextNodeDumper::VisitVarDecl(const VarDecl *D) { case VarDecl::ListInit: OS << " listinit"; break; + case VarDecl::ParenListInit: + OS << " parenlistinit"; } } if (D->needsDestruction(D->getASTContext())) @@ -1763,7 +2268,8 @@ void TextNodeDumper::VisitVarDecl(const VarDecl *D) { if (D->hasInit()) { const Expr *E = D->getInit(); // Only dump the value of constexpr VarDecls for now. - if (E && !E->isValueDependent() && D->isConstexpr()) { + if (E && !E->isValueDependent() && D->isConstexpr() && + !D->getType()->isDependentType()) { const APValue *Value = D->evaluateValue(); if (Value) AddChild("value", [=] { Visit(*Value, E->getType()); }); @@ -1836,13 +2342,13 @@ void TextNodeDumper::VisitOMPDeclareReductionDecl( OS << " initializer"; dumpPointer(Initializer); switch (D->getInitializerKind()) { - case OMPDeclareReductionDecl::DirectInit: + case OMPDeclareReductionInitKind::Direct: OS << " omp_priv = "; break; - case OMPDeclareReductionDecl::CopyInit: + case OMPDeclareReductionInitKind::Copy: OS << " omp_priv ()"; break; - case OMPDeclareReductionDecl::CallInit: + case OMPDeclareReductionInitKind::Call: break; } } @@ -1878,8 +2384,10 @@ void TextNodeDumper::VisitNamespaceDecl(const NamespaceDecl *D) { dumpName(D); if (D->isInline()) OS << " inline"; - if (!D->isOriginalNamespace()) - dumpDeclRef(D->getOriginalNamespace(), "original"); + if (D->isNested()) + OS << " nested"; + if (!D->isFirstDecl()) + dumpDeclRef(D->getFirstDecl(), "original"); } void TextNodeDumper::VisitUsingDirectiveDecl(const UsingDirectiveDecl *D) { @@ -1904,6 +2412,15 @@ void TextNodeDumper::VisitTypeAliasTemplateDecl( void TextNodeDumper::VisitCXXRecordDecl(const CXXRecordDecl *D) { VisitRecordDecl(D); + if (const auto *Instance = D->getInstantiatedFromMemberClass()) { + OS << " instantiated_from"; + dumpPointer(Instance); + } + if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) + dumpTemplateSpecializationKind(CTSD->getSpecializationKind()); + + dumpNestedNameSpecifier(D->getQualifier()); + if (!D->isCompleteDefinition()) return; @@ -2103,6 +2620,7 @@ void TextNodeDumper::VisitUsingDecl(const UsingDecl *D) { if (D->getQualifier()) D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy()); OS << D->getDeclName(); + dumpNestedNameSpecifier(D->getQualifier()); } void TextNodeDumper::VisitUsingEnumDecl(const UsingEnumDecl *D) { @@ -2159,10 +2677,10 @@ void TextNodeDumper::VisitConstructorUsingShadowDecl( void TextNodeDumper::VisitLinkageSpecDecl(const LinkageSpecDecl *D) { switch (D->getLanguage()) { - case LinkageSpecDecl::lang_c: + case LinkageSpecLanguageIDs::C: OS << " C"; break; - case LinkageSpecDecl::lang_cxx: + case LinkageSpecLanguageIDs::CXX: OS << " C++"; break; } @@ -2341,3 +2859,33 @@ void TextNodeDumper::VisitBlockDecl(const BlockDecl *D) { void TextNodeDumper::VisitConceptDecl(const ConceptDecl *D) { dumpName(D); } + +void TextNodeDumper::VisitCompoundStmt(const CompoundStmt *S) { + VisitStmt(S); + if (S->hasStoredFPFeatures()) + printFPOptions(S->getStoredFPFeatures()); +} + +void TextNodeDumper::VisitHLSLBufferDecl(const HLSLBufferDecl *D) { + if (D->isCBuffer()) + OS << " cbuffer"; + else + OS << " tbuffer"; + dumpName(D); +} + +void TextNodeDumper::VisitOpenACCConstructStmt(const OpenACCConstructStmt *S) { + OS << " " << S->getDirectiveKind(); +} +void TextNodeDumper::VisitOpenACCLoopConstruct(const OpenACCLoopConstruct *S) { + + if (S->isOrphanedLoopConstruct()) + OS << " <orphan>"; + else + OS << " parent: " << S->getParentComputeConstruct(); +} + +void TextNodeDumper::VisitEmbedExpr(const EmbedExpr *S) { + AddChild("begin", [=] { OS << S->getStartingElementPos(); }); + AddChild("number of elements", [=] { OS << S->getDataElementCount(); }); +} diff --git a/contrib/llvm-project/clang/lib/AST/Type.cpp b/contrib/llvm-project/clang/lib/AST/Type.cpp index 4a2fc5219ef0..fdaab8e43459 100644 --- a/contrib/llvm-project/clang/lib/AST/Type.cpp +++ b/contrib/llvm-project/clang/lib/AST/Type.cpp @@ -18,6 +18,7 @@ #include "clang/AST/Decl.h" #include "clang/AST/DeclBase.h" #include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclFriend.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DependenceFlags.h" @@ -42,15 +43,16 @@ #include "llvm/ADT/APSInt.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/FoldingSet.h" -#include "llvm/ADT/None.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" +#include "llvm/TargetParser/RISCVTargetParser.h" #include <algorithm> #include <cassert> #include <cstdint> #include <cstring> +#include <optional> #include <type_traits> using namespace clang; @@ -110,6 +112,25 @@ bool QualType::isConstant(QualType T, const ASTContext &Ctx) { return T.getAddressSpace() == LangAS::opencl_constant; } +std::optional<QualType::NonConstantStorageReason> +QualType::isNonConstantStorage(const ASTContext &Ctx, bool ExcludeCtor, + bool ExcludeDtor) { + if (!isConstant(Ctx) && !(*this)->isReferenceType()) + return NonConstantStorageReason::NonConstNonReferenceType; + if (!Ctx.getLangOpts().CPlusPlus) + return std::nullopt; + if (const CXXRecordDecl *Record = + Ctx.getBaseElementType(*this)->getAsCXXRecordDecl()) { + if (!ExcludeCtor) + return NonConstantStorageReason::NonTrivialCtor; + if (Record->hasMutableFields()) + return NonConstantStorageReason::MutableField; + if (!Record->hasTrivialDestructor() && !ExcludeDtor) + return NonConstantStorageReason::NonTrivialDtor; + } + return std::nullopt; +} + // C++ [temp.dep.type]p1: // A type is dependent if it is... // - an array type constructed from any dependent type or whose @@ -135,7 +156,23 @@ ArrayType::ArrayType(TypeClass tc, QualType et, QualType can, : TypeDependence::None)), ElementType(et) { ArrayTypeBits.IndexTypeQuals = tq; - ArrayTypeBits.SizeModifier = sm; + ArrayTypeBits.SizeModifier = llvm::to_underlying(sm); +} + +ConstantArrayType * +ConstantArrayType::Create(const ASTContext &Ctx, QualType ET, QualType Can, + const llvm::APInt &Sz, const Expr *SzExpr, + ArraySizeModifier SzMod, unsigned Qual) { + bool NeedsExternalSize = SzExpr != nullptr || Sz.ugt(0x0FFFFFFFFFFFFFFF) || + Sz.getBitWidth() > 0xFF; + if (!NeedsExternalSize) + return new (Ctx, alignof(ConstantArrayType)) ConstantArrayType( + ET, Can, Sz.getBitWidth(), Sz.getZExtValue(), SzMod, Qual); + + auto *SzPtr = new (Ctx, alignof(ConstantArrayType::ExternalSize)) + ConstantArrayType::ExternalSize(Sz, SzExpr); + return new (Ctx, alignof(ConstantArrayType)) + ConstantArrayType(ET, Can, SzPtr, SzMod, Qual); } unsigned ConstantArrayType::getNumAddressingBits(const ASTContext &Context, @@ -158,7 +195,7 @@ unsigned ConstantArrayType::getNumAddressingBits(const ASTContext &Context, if ((ElementSize >> 32) == 0 && NumElements.getBitWidth() <= 64 && (NumElements.getZExtValue() >> 32) == 0) { uint64_t TotalSize = NumElements.getZExtValue() * ElementSize; - return 64 - llvm::countLeadingZeros(TotalSize); + return llvm::bit_width(TotalSize); } // Otherwise, use APSInt to handle arbitrary sized values. @@ -173,6 +210,11 @@ unsigned ConstantArrayType::getNumAddressingBits(const ASTContext &Context, return TotalSize.getActiveBits(); } +unsigned +ConstantArrayType::getNumAddressingBits(const ASTContext &Context) const { + return getNumAddressingBits(Context, getElementType(), getSize()); +} + unsigned ConstantArrayType::getMaxSizeBits(const ASTContext &Context) { unsigned Bits = Context.getTypeSize(Context.getSizeType()); @@ -187,25 +229,23 @@ unsigned ConstantArrayType::getMaxSizeBits(const ASTContext &Context) { void ConstantArrayType::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, QualType ET, - const llvm::APInt &ArraySize, - const Expr *SizeExpr, ArraySizeModifier SizeMod, - unsigned TypeQuals) { + uint64_t ArraySize, const Expr *SizeExpr, + ArraySizeModifier SizeMod, unsigned TypeQuals) { ID.AddPointer(ET.getAsOpaquePtr()); - ID.AddInteger(ArraySize.getZExtValue()); - ID.AddInteger(SizeMod); + ID.AddInteger(ArraySize); + ID.AddInteger(llvm::to_underlying(SizeMod)); ID.AddInteger(TypeQuals); - ID.AddBoolean(SizeExpr != 0); + ID.AddBoolean(SizeExpr != nullptr); if (SizeExpr) SizeExpr->Profile(ID, Context, true); } -DependentSizedArrayType::DependentSizedArrayType(const ASTContext &Context, - QualType et, QualType can, +DependentSizedArrayType::DependentSizedArrayType(QualType et, QualType can, Expr *e, ArraySizeModifier sm, unsigned tq, SourceRange brackets) - : ArrayType(DependentSizedArray, et, can, sm, tq, e), - Context(Context), SizeExpr((Stmt*) e), Brackets(brackets) {} + : ArrayType(DependentSizedArray, et, can, sm, tq, e), SizeExpr((Stmt *)e), + Brackets(brackets) {} void DependentSizedArrayType::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, @@ -214,44 +254,43 @@ void DependentSizedArrayType::Profile(llvm::FoldingSetNodeID &ID, unsigned TypeQuals, Expr *E) { ID.AddPointer(ET.getAsOpaquePtr()); - ID.AddInteger(SizeMod); + ID.AddInteger(llvm::to_underlying(SizeMod)); ID.AddInteger(TypeQuals); - E->Profile(ID, Context, true); + if (E) + E->Profile(ID, Context, true); } -DependentVectorType::DependentVectorType(const ASTContext &Context, - QualType ElementType, +DependentVectorType::DependentVectorType(QualType ElementType, QualType CanonType, Expr *SizeExpr, - SourceLocation Loc, - VectorType::VectorKind VecKind) + SourceLocation Loc, VectorKind VecKind) : Type(DependentVector, CanonType, TypeDependence::DependentInstantiation | ElementType->getDependence() | (SizeExpr ? toTypeDependence(SizeExpr->getDependence()) : TypeDependence::None)), - Context(Context), ElementType(ElementType), SizeExpr(SizeExpr), Loc(Loc) { - VectorTypeBits.VecKind = VecKind; + ElementType(ElementType), SizeExpr(SizeExpr), Loc(Loc) { + VectorTypeBits.VecKind = llvm::to_underlying(VecKind); } void DependentVectorType::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, QualType ElementType, const Expr *SizeExpr, - VectorType::VectorKind VecKind) { + VectorKind VecKind) { ID.AddPointer(ElementType.getAsOpaquePtr()); - ID.AddInteger(VecKind); + ID.AddInteger(llvm::to_underlying(VecKind)); SizeExpr->Profile(ID, Context, true); } -DependentSizedExtVectorType::DependentSizedExtVectorType( - const ASTContext &Context, QualType ElementType, QualType can, - Expr *SizeExpr, SourceLocation loc) +DependentSizedExtVectorType::DependentSizedExtVectorType(QualType ElementType, + QualType can, + Expr *SizeExpr, + SourceLocation loc) : Type(DependentSizedExtVector, can, TypeDependence::DependentInstantiation | ElementType->getDependence() | (SizeExpr ? toTypeDependence(SizeExpr->getDependence()) : TypeDependence::None)), - Context(Context), SizeExpr(SizeExpr), ElementType(ElementType), loc(loc) { -} + SizeExpr(SizeExpr), ElementType(ElementType), loc(loc) {} void DependentSizedExtVectorType::Profile(llvm::FoldingSetNodeID &ID, @@ -261,8 +300,7 @@ DependentSizedExtVectorType::Profile(llvm::FoldingSetNodeID &ID, SizeExpr->Profile(ID, Context, true); } -DependentAddressSpaceType::DependentAddressSpaceType(const ASTContext &Context, - QualType PointeeType, +DependentAddressSpaceType::DependentAddressSpaceType(QualType PointeeType, QualType can, Expr *AddrSpaceExpr, SourceLocation loc) @@ -271,8 +309,7 @@ DependentAddressSpaceType::DependentAddressSpaceType(const ASTContext &Context, PointeeType->getDependence() | (AddrSpaceExpr ? toTypeDependence(AddrSpaceExpr->getDependence()) : TypeDependence::None)), - Context(Context), AddrSpaceExpr(AddrSpaceExpr), PointeeType(PointeeType), - loc(loc) {} + AddrSpaceExpr(AddrSpaceExpr), PointeeType(PointeeType), loc(loc) {} void DependentAddressSpaceType::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, @@ -311,12 +348,14 @@ ConstantMatrixType::ConstantMatrixType(TypeClass tc, QualType matrixType, : MatrixType(tc, matrixType, canonType), NumRows(nRows), NumColumns(nColumns) {} -DependentSizedMatrixType::DependentSizedMatrixType( - const ASTContext &CTX, QualType ElementType, QualType CanonicalType, - Expr *RowExpr, Expr *ColumnExpr, SourceLocation loc) +DependentSizedMatrixType::DependentSizedMatrixType(QualType ElementType, + QualType CanonicalType, + Expr *RowExpr, + Expr *ColumnExpr, + SourceLocation loc) : MatrixType(DependentSizedMatrix, ElementType, CanonicalType, RowExpr, ColumnExpr), - Context(CTX), RowExpr(RowExpr), ColumnExpr(ColumnExpr), loc(loc) {} + RowExpr(RowExpr), ColumnExpr(ColumnExpr), loc(loc) {} void DependentSizedMatrixType::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &CTX, @@ -334,35 +373,54 @@ VectorType::VectorType(QualType vecType, unsigned nElements, QualType canonType, VectorType::VectorType(TypeClass tc, QualType vecType, unsigned nElements, QualType canonType, VectorKind vecKind) : Type(tc, canonType, vecType->getDependence()), ElementType(vecType) { - VectorTypeBits.VecKind = vecKind; + VectorTypeBits.VecKind = llvm::to_underlying(vecKind); VectorTypeBits.NumElements = nElements; } -ExtIntType::ExtIntType(bool IsUnsigned, unsigned NumBits) - : Type(ExtInt, QualType{}, TypeDependence::None), IsUnsigned(IsUnsigned), +BitIntType::BitIntType(bool IsUnsigned, unsigned NumBits) + : Type(BitInt, QualType{}, TypeDependence::None), IsUnsigned(IsUnsigned), NumBits(NumBits) {} -DependentExtIntType::DependentExtIntType(const ASTContext &Context, - bool IsUnsigned, Expr *NumBitsExpr) - : Type(DependentExtInt, QualType{}, +DependentBitIntType::DependentBitIntType(bool IsUnsigned, Expr *NumBitsExpr) + : Type(DependentBitInt, QualType{}, toTypeDependence(NumBitsExpr->getDependence())), - Context(Context), ExprAndUnsigned(NumBitsExpr, IsUnsigned) {} + ExprAndUnsigned(NumBitsExpr, IsUnsigned) {} -bool DependentExtIntType::isUnsigned() const { +bool DependentBitIntType::isUnsigned() const { return ExprAndUnsigned.getInt(); } -clang::Expr *DependentExtIntType::getNumBitsExpr() const { +clang::Expr *DependentBitIntType::getNumBitsExpr() const { return ExprAndUnsigned.getPointer(); } -void DependentExtIntType::Profile(llvm::FoldingSetNodeID &ID, +void DependentBitIntType::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool IsUnsigned, Expr *NumBitsExpr) { ID.AddBoolean(IsUnsigned); NumBitsExpr->Profile(ID, Context, true); } +bool BoundsAttributedType::referencesFieldDecls() const { + return llvm::any_of(dependent_decls(), + [](const TypeCoupledDeclRefInfo &Info) { + return isa<FieldDecl>(Info.getDecl()); + }); +} + +void CountAttributedType::Profile(llvm::FoldingSetNodeID &ID, + QualType WrappedTy, Expr *CountExpr, + bool CountInBytes, bool OrNull) { + ID.AddPointer(WrappedTy.getAsOpaquePtr()); + ID.AddBoolean(CountInBytes); + ID.AddBoolean(OrNull); + // We profile it as a pointer as the StmtProfiler considers parameter + // expressions on function declaration and function definition as the + // same, resulting in count expression being evaluated with ParamDecl + // not in the function scope. + ID.AddPointer(CountExpr); +} + /// getArrayElementTypeNoTypeQual - If this is an array type, return the /// element type of the array, potentially with type qualifiers missing. /// This method should never be used when type qualifiers are meaningful. @@ -410,12 +468,8 @@ QualType QualType::getSingleStepDesugaredTypeImpl(QualType type, // Check that no type class has a non-trival destructor. Types are // allocated with the BumpPtrAllocator from ASTContext and therefore // their destructor is not executed. -// -// FIXME: ConstantArrayType is not trivially destructible because of its -// APInt member. It should be replaced in favor of ASTContext allocation. #define TYPE(CLASS, BASE) \ - static_assert(std::is_trivially_destructible<CLASS##Type>::value || \ - std::is_same<CLASS##Type, ConstantArrayType>::value, \ + static_assert(std::is_trivially_destructible<CLASS##Type>::value, \ #CLASS "Type should be trivially destructible!"); #include "clang/AST/TypeNodes.inc" @@ -525,6 +579,10 @@ template <> const TypedefType *Type::getAs() const { return getAsSugar<TypedefType>(this); } +template <> const UsingType *Type::getAs() const { + return getAsSugar<UsingType>(this); +} + template <> const TemplateSpecializationType *Type::getAs() const { return getAsSugar<TemplateSpecializationType>(this); } @@ -533,6 +591,14 @@ template <> const AttributedType *Type::getAs() const { return getAsSugar<AttributedType>(this); } +template <> const BoundsAttributedType *Type::getAs() const { + return getAsSugar<BoundsAttributedType>(this); +} + +template <> const CountAttributedType *Type::getAs() const { + return getAsSugar<CountAttributedType>(this); +} + /// getUnqualifiedDesugaredType - Pull any qualifiers and syntactic /// sugar off the given type. This should produce an object of the /// same dynamic type as the canonical type. @@ -566,6 +632,16 @@ bool Type::isStructureType() const { return false; } +bool Type::isStructureTypeWithFlexibleArrayMember() const { + const auto *RT = getAs<RecordType>(); + if (!RT) + return false; + const auto *Decl = RT->getDecl(); + if (!Decl->isStruct()) + return false; + return Decl->hasFlexibleArrayMember(); +} + bool Type::isObjCBoxableRecordType() const { if (const auto *RT = getAs<RecordType>()) return RT->getDecl()->hasAttr<ObjCBoxableAttr>(); @@ -615,6 +691,10 @@ bool Type::isScopedEnumeralType() const { return false; } +bool Type::isCountAttributedType() const { + return getAs<CountAttributedType>(); +} + const ComplexType *Type::getAsComplexIntegerType() const { if (const auto *Complex = getAs<ComplexType>()) if (Complex->getElementType()->isIntegerType()) @@ -722,8 +802,7 @@ bool Type::isObjCClassOrClassKindOfType() const { ObjCTypeParamType::ObjCTypeParamType(const ObjCTypeParamDecl *D, QualType can, ArrayRef<ObjCProtocolDecl *> protocols) - : Type(ObjCTypeParam, can, - can->getDependence() & ~TypeDependence::UnexpandedPack), + : Type(ObjCTypeParam, can, toSemanticDependence(can->getDependence())), OTPDecl(const_cast<ObjCTypeParamDecl *>(D)) { initialize(protocols); } @@ -821,6 +900,13 @@ QualType ObjCObjectType::stripObjCKindOfTypeAndQuals( /*isKindOf=*/false); } +ObjCInterfaceDecl *ObjCInterfaceType::getDecl() const { + ObjCInterfaceDecl *Canon = Decl->getCanonicalDecl(); + if (ObjCInterfaceDecl *Def = Canon->getDefinition()) + return Def; + return Canon; +} + const ObjCObjectPointerType *ObjCObjectPointerType::stripObjCKindOfTypeAndQuals( const ASTContext &ctx) const { if (!isKindOfType() && qual_empty()) @@ -1067,7 +1153,7 @@ public: if (exceptionChanged) { info.ExceptionSpec.Exceptions = - llvm::makeArrayRef(exceptionTypes).copy(Ctx); + llvm::ArrayRef(exceptionTypes).copy(Ctx); } } @@ -1122,6 +1208,14 @@ public: return Ctx.getDecayedType(originalType); } + QualType VisitArrayParameterType(const ArrayParameterType *T) { + QualType ArrTy = VisitConstantArrayType(T); + if (ArrTy.isNull()) + return {}; + + return Ctx.getArrayParameterType(ArrTy); + } + SUGARED_TYPE_CLASS(TypeOfExpr) SUGARED_TYPE_CLASS(TypeOf) SUGARED_TYPE_CLASS(Decltype) @@ -1160,8 +1254,9 @@ public: == T->getReplacementType().getAsOpaquePtr()) return QualType(T, 0); - return Ctx.getSubstTemplateTypeParmType(T->getReplacedParameter(), - replacementType); + return Ctx.getSubstTemplateTypeParmType(replacementType, + T->getAssociatedDecl(), + T->getIndex(), T->getPackIndex()); } // FIXME: Non-trivial to implement, but important for C++ @@ -1208,10 +1303,10 @@ public: !typeArgChanged) return QualType(T, 0); - return Ctx.getObjCObjectType(baseType, typeArgs, - llvm::makeArrayRef(T->qual_begin(), - T->getNumProtocols()), - T->isKindOfTypeAsWritten()); + return Ctx.getObjCObjectType( + baseType, typeArgs, + llvm::ArrayRef(T->qual_begin(), T->getNumProtocols()), + T->isKindOfTypeAsWritten()); } TRIVIAL_TYPE_CLASS(ObjCInterface) @@ -1363,7 +1458,7 @@ struct SubstObjCTypeArgsVisitor if (exceptionChanged) { info.ExceptionSpec.Exceptions = - llvm::makeArrayRef(exceptionTypes).copy(Ctx); + llvm::ArrayRef(exceptionTypes).copy(Ctx); } } @@ -1473,6 +1568,39 @@ struct StripObjCKindOfTypeVisitor } // namespace +bool QualType::UseExcessPrecision(const ASTContext &Ctx) { + const BuiltinType *BT = getTypePtr()->getAs<BuiltinType>(); + if (!BT) { + const VectorType *VT = getTypePtr()->getAs<VectorType>(); + if (VT) { + QualType ElementType = VT->getElementType(); + return ElementType.UseExcessPrecision(Ctx); + } + } else { + switch (BT->getKind()) { + case BuiltinType::Kind::Float16: { + const TargetInfo &TI = Ctx.getTargetInfo(); + if (TI.hasFloat16Type() && !TI.hasLegalHalfType() && + Ctx.getLangOpts().getFloat16ExcessPrecision() != + Ctx.getLangOpts().ExcessPrecisionKind::FPP_None) + return true; + break; + } + case BuiltinType::Kind::BFloat16: { + const TargetInfo &TI = Ctx.getTargetInfo(); + if (TI.hasBFloat16Type() && !TI.hasFullBFloat16Type() && + Ctx.getLangOpts().getBFloat16ExcessPrecision() != + Ctx.getLangOpts().ExcessPrecisionKind::FPP_None) + return true; + break; + } + default: + return false; + } + } + return false; +} + /// Substitute the given type arguments for Objective-C type /// parameters within the given type, recursively. QualType QualType::substObjCTypeArgs(ASTContext &ctx, @@ -1499,13 +1627,14 @@ QualType QualType::stripObjCKindOfType(const ASTContext &constCtx) const { } QualType QualType::getAtomicUnqualifiedType() const { - if (const auto AT = getTypePtr()->getAs<AtomicType>()) - return AT->getValueType().getUnqualifiedType(); - return getUnqualifiedType(); + QualType T = *this; + if (const auto AT = T.getTypePtr()->getAs<AtomicType>()) + T = AT->getValueType(); + return T.getUnqualifiedType(); } -Optional<ArrayRef<QualType>> Type::getObjCSubstitutions( - const DeclContext *dc) const { +std::optional<ArrayRef<QualType>> +Type::getObjCSubstitutions(const DeclContext *dc) const { // Look through method scopes. if (const auto method = dyn_cast<ObjCMethodDecl>(dc)) dc = method->getDeclContext(); @@ -1520,23 +1649,23 @@ Optional<ArrayRef<QualType>> Type::getObjCSubstitutions( // substitution to do. dcTypeParams = dcClassDecl->getTypeParamList(); if (!dcTypeParams) - return None; + return std::nullopt; } else { // If we are in neither a class nor a category, there's no // substitution to perform. dcCategoryDecl = dyn_cast<ObjCCategoryDecl>(dc); if (!dcCategoryDecl) - return None; + return std::nullopt; // If the category does not have any type parameters, there's no // substitution to do. dcTypeParams = dcCategoryDecl->getTypeParamList(); if (!dcTypeParams) - return None; + return std::nullopt; dcClassDecl = dcCategoryDecl->getClassInterface(); if (!dcClassDecl) - return None; + return std::nullopt; } assert(dcTypeParams && "No substitutions to perform"); assert(dcClassDecl && "No class context"); @@ -1885,15 +2014,26 @@ DeducedType *Type::getContainedDeducedType() const { } bool Type::hasAutoForTrailingReturnType() const { - return dyn_cast_or_null<FunctionType>( + return isa_and_nonnull<FunctionType>( GetContainedDeducedTypeVisitor(true).Visit(this)); } bool Type::hasIntegerRepresentation() const { if (const auto *VT = dyn_cast<VectorType>(CanonicalType)) return VT->getElementType()->isIntegerType(); - else - return isIntegerType(); + if (CanonicalType->isSveVLSBuiltinType()) { + const auto *VT = cast<BuiltinType>(CanonicalType); + return VT->getKind() == BuiltinType::SveBool || + (VT->getKind() >= BuiltinType::SveInt8 && + VT->getKind() <= BuiltinType::SveUint64); + } + if (CanonicalType->isRVVVLSBuiltinType()) { + const auto *VT = cast<BuiltinType>(CanonicalType); + return (VT->getKind() >= BuiltinType::RvvInt8mf8 && + VT->getKind() <= BuiltinType::RvvUint64m8); + } + + return isIntegerType(); } /// Determine whether this type is an integral type. @@ -1925,7 +2065,7 @@ bool Type::isIntegralType(const ASTContext &Ctx) const { if (const auto *ET = dyn_cast<EnumType>(CanonicalType)) return ET->getDecl()->isComplete(); - return isExtIntType(); + return isBitIntType(); } bool Type::isIntegralOrUnscopedEnumerationType() const { @@ -1933,7 +2073,7 @@ bool Type::isIntegralOrUnscopedEnumerationType() const { return BT->getKind() >= BuiltinType::Bool && BT->getKind() <= BuiltinType::Int128; - if (isExtIntType()) + if (isBitIntType()) return true; return isUnscopedEnumerationType(); @@ -2016,7 +2156,9 @@ bool Type::isSignedIntegerType() const { return ET->getDecl()->getIntegerType()->isSignedIntegerType(); } - if (const ExtIntType *IT = dyn_cast<ExtIntType>(CanonicalType)) + if (const auto *IT = dyn_cast<BitIntType>(CanonicalType)) + return IT->isSigned(); + if (const auto *IT = dyn_cast<DependentBitIntType>(CanonicalType)) return IT->isSigned(); return false; @@ -2033,9 +2175,10 @@ bool Type::isSignedIntegerOrEnumerationType() const { return ET->getDecl()->getIntegerType()->isSignedIntegerType(); } - if (const ExtIntType *IT = dyn_cast<ExtIntType>(CanonicalType)) + if (const auto *IT = dyn_cast<BitIntType>(CanonicalType)) + return IT->isSigned(); + if (const auto *IT = dyn_cast<DependentBitIntType>(CanonicalType)) return IT->isSigned(); - return false; } @@ -2063,7 +2206,9 @@ bool Type::isUnsignedIntegerType() const { return ET->getDecl()->getIntegerType()->isUnsignedIntegerType(); } - if (const ExtIntType *IT = dyn_cast<ExtIntType>(CanonicalType)) + if (const auto *IT = dyn_cast<BitIntType>(CanonicalType)) + return IT->isUnsigned(); + if (const auto *IT = dyn_cast<DependentBitIntType>(CanonicalType)) return IT->isUnsigned(); return false; @@ -2080,7 +2225,9 @@ bool Type::isUnsignedIntegerOrEnumerationType() const { return ET->getDecl()->getIntegerType()->isUnsignedIntegerType(); } - if (const ExtIntType *IT = dyn_cast<ExtIntType>(CanonicalType)) + if (const auto *IT = dyn_cast<BitIntType>(CanonicalType)) + return IT->isUnsigned(); + if (const auto *IT = dyn_cast<DependentBitIntType>(CanonicalType)) return IT->isUnsigned(); return false; @@ -2091,13 +2238,18 @@ bool Type::hasUnsignedIntegerRepresentation() const { return VT->getElementType()->isUnsignedIntegerOrEnumerationType(); if (const auto *VT = dyn_cast<MatrixType>(CanonicalType)) return VT->getElementType()->isUnsignedIntegerOrEnumerationType(); + if (CanonicalType->isSveVLSBuiltinType()) { + const auto *VT = cast<BuiltinType>(CanonicalType); + return VT->getKind() >= BuiltinType::SveUint8 && + VT->getKind() <= BuiltinType::SveUint64; + } return isUnsignedIntegerOrEnumerationType(); } bool Type::isFloatingType() const { if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType)) return BT->getKind() >= BuiltinType::Half && - BT->getKind() <= BuiltinType::Float128; + BT->getKind() <= BuiltinType::Ibm128; if (const auto *CT = dyn_cast<ComplexType>(CanonicalType)) return CT->getElementType()->isFloatingType(); return false; @@ -2106,8 +2258,9 @@ bool Type::isFloatingType() const { bool Type::hasFloatingRepresentation() const { if (const auto *VT = dyn_cast<VectorType>(CanonicalType)) return VT->getElementType()->isFloatingType(); - else - return isFloatingType(); + if (const auto *MT = dyn_cast<MatrixType>(CanonicalType)) + return MT->getElementType()->isFloatingType(); + return isFloatingType(); } bool Type::isRealFloatingType() const { @@ -2119,17 +2272,16 @@ bool Type::isRealFloatingType() const { bool Type::isRealType() const { if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType)) return BT->getKind() >= BuiltinType::Bool && - BT->getKind() <= BuiltinType::Float128; + BT->getKind() <= BuiltinType::Ibm128; if (const auto *ET = dyn_cast<EnumType>(CanonicalType)) return ET->getDecl()->isComplete() && !ET->getDecl()->isScoped(); - return isExtIntType(); + return isBitIntType(); } bool Type::isArithmeticType() const { if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType)) return BT->getKind() >= BuiltinType::Bool && - BT->getKind() <= BuiltinType::Float128 && - BT->getKind() != BuiltinType::BFloat16; + BT->getKind() <= BuiltinType::Ibm128; if (const auto *ET = dyn_cast<EnumType>(CanonicalType)) // GCC allows forward declaration of enum types (forbid by C99 6.7.2.3p2). // If a body isn't seen by the time we get here, return false. @@ -2138,7 +2290,7 @@ bool Type::isArithmeticType() const { // false for scoped enumerations since that will disable any // unwanted implicit conversions. return !ET->getDecl()->isScoped() && ET->getDecl()->isComplete(); - return isa<ComplexType>(CanonicalType) || isExtIntType(); + return isa<ComplexType>(CanonicalType) || isBitIntType(); } Type::ScalarTypeKind Type::getScalarTypeKind() const { @@ -2167,7 +2319,7 @@ Type::ScalarTypeKind Type::getScalarTypeKind() const { if (CT->getElementType()->isRealFloatingType()) return STK_FloatingComplex; return STK_IntegralComplex; - } else if (isExtIntType()) { + } else if (isBitIntType()) { return STK_Integral; } @@ -2231,6 +2383,14 @@ bool Type::isIncompleteType(NamedDecl **Def) const { *Def = Rec; return !Rec->isCompleteDefinition(); } + case InjectedClassName: { + CXXRecordDecl *Rec = cast<InjectedClassNameType>(CanonicalType)->getDecl(); + if (!Rec->isBeingDefined()) + return false; + if (Def) + *Def = Rec; + return true; + } case ConstantArray: case VariableArray: // An array is incomplete if its element type is incomplete @@ -2279,11 +2439,61 @@ bool Type::isIncompleteType(NamedDecl **Def) const { } bool Type::isSizelessBuiltinType() const { + if (isSizelessVectorType()) + return true; + + if (const BuiltinType *BT = getAs<BuiltinType>()) { + switch (BT->getKind()) { + // WebAssembly reference types +#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id: +#include "clang/Basic/WebAssemblyReferenceTypes.def" + return true; + default: + return false; + } + } + return false; +} + +bool Type::isWebAssemblyExternrefType() const { + if (const auto *BT = getAs<BuiltinType>()) + return BT->getKind() == BuiltinType::WasmExternRef; + return false; +} + +bool Type::isWebAssemblyTableType() const { + if (const auto *ATy = dyn_cast<ArrayType>(this)) + return ATy->getElementType().isWebAssemblyReferenceType(); + + if (const auto *PTy = dyn_cast<PointerType>(this)) + return PTy->getPointeeType().isWebAssemblyReferenceType(); + + return false; +} + +bool Type::isSizelessType() const { return isSizelessBuiltinType(); } + +bool Type::isSizelessVectorType() const { + return isSVESizelessBuiltinType() || isRVVSizelessBuiltinType(); +} + +bool Type::isSVESizelessBuiltinType() const { if (const BuiltinType *BT = getAs<BuiltinType>()) { switch (BT->getKind()) { // SVE Types #define SVE_TYPE(Name, Id, SingletonId) case BuiltinType::Id: #include "clang/Basic/AArch64SVEACLETypes.def" + return true; + default: + return false; + } + } + return false; +} + +bool Type::isRVVSizelessBuiltinType() const { + if (const BuiltinType *BT = getAs<BuiltinType>()) { + switch (BT->getKind()) { #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: #include "clang/Basic/RISCVVTypes.def" return true; @@ -2294,9 +2504,7 @@ bool Type::isSizelessBuiltinType() const { return false; } -bool Type::isSizelessType() const { return isSizelessBuiltinType(); } - -bool Type::isVLSTBuiltinType() const { +bool Type::isSveVLSBuiltinType() const { if (const BuiltinType *BT = getAs<BuiltinType>()) { switch (BT->getKind()) { case BuiltinType::SveInt8: @@ -2312,6 +2520,8 @@ bool Type::isVLSTBuiltinType() const { case BuiltinType::SveFloat64: case BuiltinType::SveBFloat16: case BuiltinType::SveBool: + case BuiltinType::SveBoolx2: + case BuiltinType::SveBoolx4: return true; default: return false; @@ -2320,10 +2530,22 @@ bool Type::isVLSTBuiltinType() const { return false; } +QualType Type::getSizelessVectorEltType(const ASTContext &Ctx) const { + assert(isSizelessVectorType() && "Must be sizeless vector type"); + // Currently supports SVE and RVV + if (isSVESizelessBuiltinType()) + return getSveEltType(Ctx); + + if (isRVVSizelessBuiltinType()) + return getRVVEltType(Ctx); + + llvm_unreachable("Unhandled type"); +} + QualType Type::getSveEltType(const ASTContext &Ctx) const { - assert(isVLSTBuiltinType() && "unsupported type!"); + assert(isSveVLSBuiltinType() && "unsupported type!"); - const BuiltinType *BTy = getAs<BuiltinType>(); + const BuiltinType *BTy = castAs<BuiltinType>(); if (BTy->getKind() == BuiltinType::SveBool) // Represent predicates as i8 rather than i1 to avoid any layout issues. // The type is bitcasted to a scalable predicate type when casting between @@ -2333,6 +2555,41 @@ QualType Type::getSveEltType(const ASTContext &Ctx) const { return Ctx.getBuiltinVectorTypeInfo(BTy).ElementType; } +bool Type::isRVVVLSBuiltinType() const { + if (const BuiltinType *BT = getAs<BuiltinType>()) { + switch (BT->getKind()) { +#define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \ + IsFP, IsBF) \ + case BuiltinType::Id: \ + return NF == 1; +#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ + case BuiltinType::Id: \ + return true; +#include "clang/Basic/RISCVVTypes.def" + default: + return false; + } + } + return false; +} + +QualType Type::getRVVEltType(const ASTContext &Ctx) const { + assert(isRVVVLSBuiltinType() && "unsupported type!"); + + const BuiltinType *BTy = castAs<BuiltinType>(); + + switch (BTy->getKind()) { +#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ + case BuiltinType::Id: \ + return Ctx.UnsignedCharTy; + default: + return Ctx.getBuiltinVectorTypeInfo(BTy).ElementType; +#include "clang/Basic/RISCVVTypes.def" + } + + llvm_unreachable("Unhandled type"); +} + bool QualType::isPODType(const ASTContext &Context) const { // C++11 has a more relaxed definition of POD. if (Context.getLangOpts().CPlusPlus11) @@ -2374,7 +2631,7 @@ bool QualType::isCXX98PODType(const ASTContext &Context) const { case Type::MemberPointer: case Type::Vector: case Type::ExtVector: - case Type::ExtInt: + case Type::BitInt: return true; case Type::Enum: @@ -2425,11 +2682,13 @@ bool QualType::isTrivialType(const ASTContext &Context) const { return true; if (const auto *RT = CanonicalType->getAs<RecordType>()) { if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) { - // C++11 [class]p6: - // A trivial class is a class that has a default constructor, - // has no non-trivial default constructors, and is trivially - // copyable. - return ClassDecl->hasDefaultConstructor() && + // C++20 [class]p6: + // A trivial class is a class that is trivially copyable, and + // has one or more eligible default constructors such that each is + // trivial. + // FIXME: We should merge this definition of triviality into + // CXXRecordDecl::isTrivial. Currently it computes the wrong thing. + return ClassDecl->hasTrivialDefaultConstructor() && !ClassDecl->hasNonTrivialDefaultConstructor() && ClassDecl->isTriviallyCopyable(); } @@ -2441,19 +2700,22 @@ bool QualType::isTrivialType(const ASTContext &Context) const { return false; } -bool QualType::isTriviallyCopyableType(const ASTContext &Context) const { - if ((*this)->isArrayType()) - return Context.getBaseElementType(*this).isTriviallyCopyableType(Context); +static bool isTriviallyCopyableTypeImpl(const QualType &type, + const ASTContext &Context, + bool IsCopyConstructible) { + if (type->isArrayType()) + return isTriviallyCopyableTypeImpl(Context.getBaseElementType(type), + Context, IsCopyConstructible); - if (hasNonTrivialObjCLifetime()) + if (type.hasNonTrivialObjCLifetime()) return false; // C++11 [basic.types]p9 - See Core 2094 // Scalar types, trivially copyable class types, arrays of such types, and // cv-qualified versions of these types are collectively - // called trivially copyable types. + // called trivially copy constructible types. - QualType CanonicalType = getCanonicalType(); + QualType CanonicalType = type.getCanonicalType(); if (CanonicalType->isDependentType()) return false; @@ -2471,16 +2733,89 @@ bool QualType::isTriviallyCopyableType(const ASTContext &Context) const { if (const auto *RT = CanonicalType->getAs<RecordType>()) { if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) { - if (!ClassDecl->isTriviallyCopyable()) return false; + if (IsCopyConstructible) { + return ClassDecl->isTriviallyCopyConstructible(); + } else { + return ClassDecl->isTriviallyCopyable(); + } } - return true; } - // No other types can match. return false; } +bool QualType::isTriviallyCopyableType(const ASTContext &Context) const { + return isTriviallyCopyableTypeImpl(*this, Context, + /*IsCopyConstructible=*/false); +} + +// FIXME: each call will trigger a full computation, cache the result. +bool QualType::isBitwiseCloneableType(const ASTContext &Context) const { + auto CanonicalType = getCanonicalType(); + if (CanonicalType.hasNonTrivialObjCLifetime()) + return false; + if (CanonicalType->isArrayType()) + return Context.getBaseElementType(CanonicalType) + .isBitwiseCloneableType(Context); + + if (CanonicalType->isIncompleteType()) + return false; + const auto *RD = CanonicalType->getAsRecordDecl(); // struct/union/class + if (!RD) + return true; + + // Never allow memcpy when we're adding poisoned padding bits to the struct. + // Accessing these posioned bits will trigger false alarms on + // SanitizeAddressFieldPadding etc. + if (RD->mayInsertExtraPadding()) + return false; + + for (auto *const Field : RD->fields()) { + if (!Field->getType().isBitwiseCloneableType(Context)) + return false; + } + + if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { + for (auto Base : CXXRD->bases()) + if (!Base.getType().isBitwiseCloneableType(Context)) + return false; + for (auto VBase : CXXRD->vbases()) + if (!VBase.getType().isBitwiseCloneableType(Context)) + return false; + } + return true; +} + +bool QualType::isTriviallyCopyConstructibleType( + const ASTContext &Context) const { + return isTriviallyCopyableTypeImpl(*this, Context, + /*IsCopyConstructible=*/true); +} + +bool QualType::isTriviallyRelocatableType(const ASTContext &Context) const { + QualType BaseElementType = Context.getBaseElementType(*this); + + if (BaseElementType->isIncompleteType()) { + return false; + } else if (!BaseElementType->isObjectType()) { + return false; + } else if (const auto *RD = BaseElementType->getAsRecordDecl()) { + return RD->canPassInRegisters(); + } else if (BaseElementType.isTriviallyCopyableType(Context)) { + return true; + } else { + switch (isNonTrivialToPrimitiveDestructiveMove()) { + case PCK_Trivial: + return !isDestructedType(); + case PCK_ARCStrong: + return true; + default: + return false; + } + } +} + bool QualType::isNonWeakInMRRWithObjCWeak(const ASTContext &Context) const { return !Context.getLangOpts().ObjCAutoRefCount && Context.getLangOpts().ObjCWeak && @@ -2499,6 +2834,19 @@ bool QualType::hasNonTrivialToPrimitiveCopyCUnion(const RecordDecl *RD) { return RD->hasNonTrivialToPrimitiveCopyCUnion(); } +bool QualType::isWebAssemblyReferenceType() const { + return isWebAssemblyExternrefType() || isWebAssemblyFuncrefType(); +} + +bool QualType::isWebAssemblyExternrefType() const { + return getTypePtr()->isWebAssemblyExternrefType(); +} + +bool QualType::isWebAssemblyFuncrefType() const { + return getTypePtr()->isFunctionPointerType() && + getAddressSpace() == LangAS::wasm_funcref; +} + QualType::PrimitiveDefaultInitializeKind QualType::isNonTrivialToPrimitiveDefaultInitialize() const { if (const auto *RT = @@ -2734,39 +3082,6 @@ bool Type::isStdByteType() const { return false; } -bool Type::isPromotableIntegerType() const { - if (const auto *BT = getAs<BuiltinType>()) - switch (BT->getKind()) { - case BuiltinType::Bool: - case BuiltinType::Char_S: - case BuiltinType::Char_U: - case BuiltinType::SChar: - case BuiltinType::UChar: - case BuiltinType::Short: - case BuiltinType::UShort: - case BuiltinType::WChar_S: - case BuiltinType::WChar_U: - case BuiltinType::Char8: - case BuiltinType::Char16: - case BuiltinType::Char32: - return true; - default: - return false; - } - - // Enumerated types are promotable to their compatible integer types - // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2). - if (const auto *ET = getAs<EnumType>()){ - if (this->isDependentType() || ET->getDecl()->getPromotionType().isNull() - || ET->getDecl()->isScoped()) - return false; - - return true; - } - - return false; -} - bool Type::isSpecifierType() const { // Note that this intentionally does not use the canonical type. switch (getTypeClass()) { @@ -2785,7 +3100,6 @@ bool Type::isSpecifierType() const { case DependentTemplateSpecialization: case ObjCInterface: case ObjCObject: - case ObjCObjectPointer: // FIXME: object pointers aren't really specifiers return true; default: return false; @@ -2795,24 +3109,36 @@ bool Type::isSpecifierType() const { ElaboratedTypeKeyword TypeWithKeyword::getKeywordForTypeSpec(unsigned TypeSpec) { switch (TypeSpec) { - default: return ETK_None; - case TST_typename: return ETK_Typename; - case TST_class: return ETK_Class; - case TST_struct: return ETK_Struct; - case TST_interface: return ETK_Interface; - case TST_union: return ETK_Union; - case TST_enum: return ETK_Enum; + default: + return ElaboratedTypeKeyword::None; + case TST_typename: + return ElaboratedTypeKeyword::Typename; + case TST_class: + return ElaboratedTypeKeyword::Class; + case TST_struct: + return ElaboratedTypeKeyword::Struct; + case TST_interface: + return ElaboratedTypeKeyword::Interface; + case TST_union: + return ElaboratedTypeKeyword::Union; + case TST_enum: + return ElaboratedTypeKeyword::Enum; } } TagTypeKind TypeWithKeyword::getTagTypeKindForTypeSpec(unsigned TypeSpec) { switch(TypeSpec) { - case TST_class: return TTK_Class; - case TST_struct: return TTK_Struct; - case TST_interface: return TTK_Interface; - case TST_union: return TTK_Union; - case TST_enum: return TTK_Enum; + case TST_class: + return TagTypeKind::Class; + case TST_struct: + return TagTypeKind::Struct; + case TST_interface: + return TagTypeKind::Interface; + case TST_union: + return TagTypeKind::Union; + case TST_enum: + return TagTypeKind::Enum; } llvm_unreachable("Type specifier is not a tag type kind."); @@ -2821,11 +3147,16 @@ TypeWithKeyword::getTagTypeKindForTypeSpec(unsigned TypeSpec) { ElaboratedTypeKeyword TypeWithKeyword::getKeywordForTagTypeKind(TagTypeKind Kind) { switch (Kind) { - case TTK_Class: return ETK_Class; - case TTK_Struct: return ETK_Struct; - case TTK_Interface: return ETK_Interface; - case TTK_Union: return ETK_Union; - case TTK_Enum: return ETK_Enum; + case TagTypeKind::Class: + return ElaboratedTypeKeyword::Class; + case TagTypeKind::Struct: + return ElaboratedTypeKeyword::Struct; + case TagTypeKind::Interface: + return ElaboratedTypeKeyword::Interface; + case TagTypeKind::Union: + return ElaboratedTypeKeyword::Union; + case TagTypeKind::Enum: + return ElaboratedTypeKeyword::Enum; } llvm_unreachable("Unknown tag type kind."); } @@ -2833,13 +3164,18 @@ TypeWithKeyword::getKeywordForTagTypeKind(TagTypeKind Kind) { TagTypeKind TypeWithKeyword::getTagTypeKindForKeyword(ElaboratedTypeKeyword Keyword) { switch (Keyword) { - case ETK_Class: return TTK_Class; - case ETK_Struct: return TTK_Struct; - case ETK_Interface: return TTK_Interface; - case ETK_Union: return TTK_Union; - case ETK_Enum: return TTK_Enum; - case ETK_None: // Fall through. - case ETK_Typename: + case ElaboratedTypeKeyword::Class: + return TagTypeKind::Class; + case ElaboratedTypeKeyword::Struct: + return TagTypeKind::Struct; + case ElaboratedTypeKeyword::Interface: + return TagTypeKind::Interface; + case ElaboratedTypeKeyword::Union: + return TagTypeKind::Union; + case ElaboratedTypeKeyword::Enum: + return TagTypeKind::Enum; + case ElaboratedTypeKeyword::None: // Fall through. + case ElaboratedTypeKeyword::Typename: llvm_unreachable("Elaborated type keyword is not a tag type kind."); } llvm_unreachable("Unknown elaborated type keyword."); @@ -2848,14 +3184,14 @@ TypeWithKeyword::getTagTypeKindForKeyword(ElaboratedTypeKeyword Keyword) { bool TypeWithKeyword::KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword) { switch (Keyword) { - case ETK_None: - case ETK_Typename: + case ElaboratedTypeKeyword::None: + case ElaboratedTypeKeyword::Typename: return false; - case ETK_Class: - case ETK_Struct: - case ETK_Interface: - case ETK_Union: - case ETK_Enum: + case ElaboratedTypeKeyword::Class: + case ElaboratedTypeKeyword::Struct: + case ElaboratedTypeKeyword::Interface: + case ElaboratedTypeKeyword::Union: + case ElaboratedTypeKeyword::Enum: return true; } llvm_unreachable("Unknown elaborated type keyword."); @@ -2863,13 +3199,20 @@ TypeWithKeyword::KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword) { StringRef TypeWithKeyword::getKeywordName(ElaboratedTypeKeyword Keyword) { switch (Keyword) { - case ETK_None: return {}; - case ETK_Typename: return "typename"; - case ETK_Class: return "class"; - case ETK_Struct: return "struct"; - case ETK_Interface: return "__interface"; - case ETK_Union: return "union"; - case ETK_Enum: return "enum"; + case ElaboratedTypeKeyword::None: + return {}; + case ElaboratedTypeKeyword::Typename: + return "typename"; + case ElaboratedTypeKeyword::Class: + return "class"; + case ElaboratedTypeKeyword::Struct: + return "struct"; + case ElaboratedTypeKeyword::Interface: + return "__interface"; + case ElaboratedTypeKeyword::Union: + return "union"; + case ElaboratedTypeKeyword::Enum: + return "enum"; } llvm_unreachable("Unknown elaborated type keyword."); @@ -2886,7 +3229,7 @@ DependentTemplateSpecializationType::DependentTemplateSpecializationType( DependentTemplateSpecializationTypeBits.NumArgs = Args.size(); assert((!NNS || NNS->isDependent()) && "DependentTemplateSpecializatonType requires dependent qualifier"); - TemplateArgument *ArgBuffer = getArgBuffer(); + auto *ArgBuffer = const_cast<TemplateArgument *>(template_arguments().data()); for (const TemplateArgument &Arg : Args) { addDependence(toTypeDependence(Arg.getDependence() & TemplateArgumentDependence::UnexpandedPack)); @@ -2902,7 +3245,7 @@ DependentTemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID, NestedNameSpecifier *Qualifier, const IdentifierInfo *Name, ArrayRef<TemplateArgument> Args) { - ID.AddInteger(Keyword); + ID.AddInteger(llvm::to_underlying(Keyword)); ID.AddPointer(Qualifier); ID.AddPointer(Name); for (const TemplateArgument &Arg : Args) @@ -3030,6 +3373,8 @@ StringRef BuiltinType::getName(const PrintingPolicy &Policy) const { return "_Float16"; case Float128: return "__float128"; + case Ibm128: + return "__ibm128"; case WChar_S: case WChar_U: return Policy.MSWChar ? "__wchar_t" : "wchar_t"; @@ -3040,11 +3385,13 @@ StringRef BuiltinType::getName(const PrintingPolicy &Policy) const { case Char32: return "char32_t"; case NullPtr: - return "nullptr_t"; + return Policy.NullptrTypeInNamespace ? "std::nullptr_t" : "nullptr_t"; case Overload: return "<overloaded function type>"; case BoundMember: return "<bound member function type>"; + case UnresolvedTemplate: + return "<unresolved template type>"; case PseudoObject: return "<pseudo-object type>"; case Dependent: @@ -3077,8 +3424,8 @@ StringRef BuiltinType::getName(const PrintingPolicy &Policy) const { return "reserve_id_t"; case IncompleteMatrixIdx: return "<incomplete matrix index type>"; - case OMPArraySection: - return "<OpenMP array section type>"; + case ArraySection: + return "<array section type>"; case OMPArrayShaping: return "<OpenMP array shaping type>"; case OMPIterator: @@ -3099,6 +3446,14 @@ StringRef BuiltinType::getName(const PrintingPolicy &Policy) const { case Id: \ return Name; #include "clang/Basic/RISCVVTypes.def" +#define WASM_TYPE(Name, Id, SingletonId) \ + case Id: \ + return Name; +#include "clang/Basic/WebAssemblyReferenceTypes.def" +#define AMDGPU_TYPE(Name, Id, SingletonId) \ + case Id: \ + return Name; +#include "clang/Basic/AMDGPUTypes.def" } llvm_unreachable("Invalid builtin type."); @@ -3141,6 +3496,8 @@ StringRef FunctionType::getNameForCallConv(CallingConv CC) { case CC_AAPCS: return "aapcs"; case CC_AAPCS_VFP: return "aapcs-vfp"; case CC_AArch64VectorCall: return "aarch64_vector_pcs"; + case CC_AArch64SVEPCS: return "aarch64_sve_pcs"; + case CC_AMDGPUKernelCall: return "amdgpu_kernel"; case CC_IntelOclBicc: return "intel_ocl_bicc"; case CC_SpirFunction: return "spir_function"; case CC_OpenCLKernel: return "opencl_kernel"; @@ -3148,11 +3505,23 @@ StringRef FunctionType::getNameForCallConv(CallingConv CC) { case CC_SwiftAsync: return "swiftasynccall"; case CC_PreserveMost: return "preserve_most"; case CC_PreserveAll: return "preserve_all"; + case CC_M68kRTD: return "m68k_rtd"; + case CC_PreserveNone: return "preserve_none"; + // clang-format off + case CC_RISCVVectorCall: return "riscv_vector_cc"; + // clang-format on } llvm_unreachable("Invalid calling convention."); } +void FunctionProtoType::ExceptionSpecInfo::instantiate() { + assert(Type == EST_Uninstantiated); + NoexceptExpr = + cast<FunctionProtoType>(SourceTemplate->getType())->getNoexceptExpr(); + Type = EST_DependentNoexcept; +} + FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params, QualType canonical, const ExtProtoInfo &epi) @@ -3167,10 +3536,21 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params, FunctionTypeBits.Variadic = epi.Variadic; FunctionTypeBits.HasTrailingReturn = epi.HasTrailingReturn; - // Fill in the extra trailing bitfields if present. - if (hasExtraBitfields(epi.ExceptionSpec.Type)) { + if (epi.requiresFunctionProtoTypeExtraBitfields()) { + FunctionTypeBits.HasExtraBitfields = true; auto &ExtraBits = *getTrailingObjects<FunctionTypeExtraBitfields>(); - ExtraBits.NumExceptionType = epi.ExceptionSpec.Exceptions.size(); + ExtraBits = FunctionTypeExtraBitfields(); + } else { + FunctionTypeBits.HasExtraBitfields = false; + } + + if (epi.requiresFunctionProtoTypeArmAttributes()) { + auto &ArmTypeAttrs = *getTrailingObjects<FunctionTypeArmAttributes>(); + ArmTypeAttrs = FunctionTypeArmAttributes(); + + // Also set the bit in FunctionTypeExtraBitfields + auto &ExtraBits = *getTrailingObjects<FunctionTypeExtraBitfields>(); + ExtraBits.HasArmTypeAttributes = true; } // Fill in the trailing argument array. @@ -3181,8 +3561,21 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params, argSlot[i] = params[i]; } + // Propagate the SME ACLE attributes. + if (epi.AArch64SMEAttributes != SME_NormalFunction) { + auto &ArmTypeAttrs = *getTrailingObjects<FunctionTypeArmAttributes>(); + assert(epi.AArch64SMEAttributes <= SME_AttributeMask && + "Not enough bits to encode SME attributes"); + ArmTypeAttrs.AArch64SMEAttributes = epi.AArch64SMEAttributes; + } + // Fill in the exception type array if present. if (getExceptionSpecType() == EST_Dynamic) { + auto &ExtraBits = *getTrailingObjects<FunctionTypeExtraBitfields>(); + size_t NumExceptions = epi.ExceptionSpec.Exceptions.size(); + assert(NumExceptions <= 1023 && "Not enough bits to encode exceptions"); + ExtraBits.NumExceptionType = NumExceptions; + assert(hasExtraBitfields() && "missing trailing extra bitfields!"); auto *exnSlot = reinterpret_cast<QualType *>(getTrailingObjects<ExceptionType>()); @@ -3259,6 +3652,34 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params, auto &EllipsisLoc = *getTrailingObjects<SourceLocation>(); EllipsisLoc = epi.EllipsisLoc; } + + if (!epi.FunctionEffects.empty()) { + auto &ExtraBits = *getTrailingObjects<FunctionTypeExtraBitfields>(); + size_t EffectsCount = epi.FunctionEffects.size(); + ExtraBits.NumFunctionEffects = EffectsCount; + assert(ExtraBits.NumFunctionEffects == EffectsCount && + "effect bitfield overflow"); + + ArrayRef<FunctionEffect> SrcFX = epi.FunctionEffects.effects(); + auto *DestFX = getTrailingObjects<FunctionEffect>(); + std::uninitialized_copy(SrcFX.begin(), SrcFX.end(), DestFX); + + ArrayRef<EffectConditionExpr> SrcConds = epi.FunctionEffects.conditions(); + if (!SrcConds.empty()) { + ExtraBits.EffectsHaveConditions = true; + auto *DestConds = getTrailingObjects<EffectConditionExpr>(); + std::uninitialized_copy(SrcConds.begin(), SrcConds.end(), DestConds); + assert(std::any_of(SrcConds.begin(), SrcConds.end(), + [](const EffectConditionExpr &EC) { + if (const Expr *E = EC.getCondition()) + return E->isTypeDependent() || + E->isValueDependent(); + return false; + }) && + "expected a dependent expression among the conditions"); + addDependence(TypeDependence::DependentInstantiation); + } + } } bool FunctionProtoType::hasDependentExceptionSpec() const { @@ -3286,7 +3707,6 @@ CanThrowResult FunctionProtoType::canThrow() const { switch (getExceptionSpecType()) { case EST_Unparsed: case EST_Unevaluated: - case EST_Uninstantiated: llvm_unreachable("should not call this with unresolved exception specs"); case EST_DynamicNone: @@ -3308,6 +3728,7 @@ CanThrowResult FunctionProtoType::canThrow() const { return CT_Can; return CT_Dependent; + case EST_Uninstantiated: case EST_DependentNoexcept: return CT_Dependent; } @@ -3337,8 +3758,12 @@ void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result, // This is followed by an optional "consumed argument" section of the // same length as the first type sequence: // bool* - // Finally, we have the ext info and trailing return type flag: - // int bool + // This is followed by the ext info: + // int + // Finally we have a trailing return type flag (bool) + // combined with AArch64 SME Attributes, to save space: + // int + // combined with any FunctionEffects // // There is no ambiguity between the consumed arguments and an empty EH // spec because of the leading 'bool' which unambiguously indicates @@ -3371,8 +3796,20 @@ void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result, for (unsigned i = 0; i != NumParams; ++i) ID.AddInteger(epi.ExtParameterInfos[i].getOpaqueValue()); } + epi.ExtInfo.Profile(ID); - ID.AddBoolean(epi.HasTrailingReturn); + + unsigned EffectCount = epi.FunctionEffects.size(); + bool HasConds = !epi.FunctionEffects.Conditions.empty(); + + ID.AddInteger((EffectCount << 3) | (HasConds << 2) | + (epi.AArch64SMEAttributes << 1) | epi.HasTrailingReturn); + + for (unsigned Idx = 0; Idx != EffectCount; ++Idx) { + ID.AddInteger(epi.FunctionEffects.Effects[Idx].toOpaqueInt32()); + if (HasConds) + ID.AddPointer(epi.FunctionEffects.Conditions[Idx].getCondition()); + } } void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, @@ -3381,15 +3818,72 @@ void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, getExtProtoInfo(), Ctx, isCanonicalUnqualified()); } +TypeCoupledDeclRefInfo::TypeCoupledDeclRefInfo(ValueDecl *D, bool Deref) + : Data(D, Deref << DerefShift) {} + +bool TypeCoupledDeclRefInfo::isDeref() const { + return Data.getInt() & DerefMask; +} +ValueDecl *TypeCoupledDeclRefInfo::getDecl() const { return Data.getPointer(); } +unsigned TypeCoupledDeclRefInfo::getInt() const { return Data.getInt(); } +void *TypeCoupledDeclRefInfo::getOpaqueValue() const { + return Data.getOpaqueValue(); +} +bool TypeCoupledDeclRefInfo::operator==( + const TypeCoupledDeclRefInfo &Other) const { + return getOpaqueValue() == Other.getOpaqueValue(); +} +void TypeCoupledDeclRefInfo::setFromOpaqueValue(void *V) { + Data.setFromOpaqueValue(V); +} + +BoundsAttributedType::BoundsAttributedType(TypeClass TC, QualType Wrapped, + QualType Canon) + : Type(TC, Canon, Wrapped->getDependence()), WrappedTy(Wrapped) {} + +CountAttributedType::CountAttributedType( + QualType Wrapped, QualType Canon, Expr *CountExpr, bool CountInBytes, + bool OrNull, ArrayRef<TypeCoupledDeclRefInfo> CoupledDecls) + : BoundsAttributedType(CountAttributed, Wrapped, Canon), + CountExpr(CountExpr) { + CountAttributedTypeBits.NumCoupledDecls = CoupledDecls.size(); + CountAttributedTypeBits.CountInBytes = CountInBytes; + CountAttributedTypeBits.OrNull = OrNull; + auto *DeclSlot = getTrailingObjects<TypeCoupledDeclRefInfo>(); + Decls = llvm::ArrayRef(DeclSlot, CoupledDecls.size()); + for (unsigned i = 0; i != CoupledDecls.size(); ++i) + DeclSlot[i] = CoupledDecls[i]; +} + TypedefType::TypedefType(TypeClass tc, const TypedefNameDecl *D, - QualType underlying, QualType can) - : Type(tc, can, underlying->getDependence()), + QualType Underlying, QualType can) + : Type(tc, can, toSemanticDependence(can->getDependence())), Decl(const_cast<TypedefNameDecl *>(D)) { assert(!isa<TypedefType>(can) && "Invalid canonical type"); + TypedefBits.hasTypeDifferentFromDecl = !Underlying.isNull(); + if (!typeMatchesDecl()) + *getTrailingObjects<QualType>() = Underlying; } QualType TypedefType::desugar() const { - return getDecl()->getUnderlyingType(); + return typeMatchesDecl() ? Decl->getUnderlyingType() + : *getTrailingObjects<QualType>(); +} + +UsingType::UsingType(const UsingShadowDecl *Found, QualType Underlying, + QualType Canon) + : Type(Using, Canon, toSemanticDependence(Canon->getDependence())), + Found(const_cast<UsingShadowDecl *>(Found)) { + UsingBits.hasTypeDifferentFromDecl = !Underlying.isNull(); + if (!typeMatchesDecl()) + *getTrailingObjects<QualType>() = Underlying; +} + +QualType UsingType::getUnderlyingType() const { + return typeMatchesDecl() + ? QualType( + cast<TypeDecl>(Found->getTargetDecl())->getTypeForDecl(), 0) + : *getTrailingObjects<QualType>(); } QualType MacroQualifiedType::desugar() const { return getUnderlyingType(); } @@ -3406,27 +3900,58 @@ QualType MacroQualifiedType::getModifiedType() const { return Inner; } -TypeOfExprType::TypeOfExprType(Expr *E, QualType can) - : Type(TypeOfExpr, can, +TypeOfExprType::TypeOfExprType(const ASTContext &Context, Expr *E, + TypeOfKind Kind, QualType Can) + : Type(TypeOfExpr, + // We have to protect against 'Can' being invalid through its + // default argument. + Kind == TypeOfKind::Unqualified && !Can.isNull() + ? Context.getUnqualifiedArrayType(Can).getAtomicUnqualifiedType() + : Can, toTypeDependence(E->getDependence()) | (E->getType()->getDependence() & TypeDependence::VariablyModified)), - TOExpr(E) {} + TOExpr(E), Context(Context) { + TypeOfBits.Kind = static_cast<unsigned>(Kind); +} bool TypeOfExprType::isSugared() const { return !TOExpr->isTypeDependent(); } QualType TypeOfExprType::desugar() const { - if (isSugared()) - return getUnderlyingExpr()->getType(); - + if (isSugared()) { + QualType QT = getUnderlyingExpr()->getType(); + return getKind() == TypeOfKind::Unqualified + ? Context.getUnqualifiedArrayType(QT).getAtomicUnqualifiedType() + : QT; + } return QualType(this, 0); } void DependentTypeOfExprType::Profile(llvm::FoldingSetNodeID &ID, - const ASTContext &Context, Expr *E) { + const ASTContext &Context, Expr *E, + bool IsUnqual) { E->Profile(ID, Context, true); + ID.AddBoolean(IsUnqual); +} + +TypeOfType::TypeOfType(const ASTContext &Context, QualType T, QualType Can, + TypeOfKind Kind) + : Type(TypeOf, + Kind == TypeOfKind::Unqualified + ? Context.getUnqualifiedArrayType(Can).getAtomicUnqualifiedType() + : Can, + T->getDependence()), + TOType(T), Context(Context) { + TypeOfBits.Kind = static_cast<unsigned>(Kind); +} + +QualType TypeOfType::desugar() const { + QualType QT = getUnmodifiedType(); + return getKind() == TypeOfKind::Unqualified + ? Context.getUnqualifiedArrayType(QT).getAtomicUnqualifiedType() + : QT; } DecltypeType::DecltypeType(Expr *E, QualType underlyingType, QualType can) @@ -3450,14 +3975,71 @@ QualType DecltypeType::desugar() const { return QualType(this, 0); } -DependentDecltypeType::DependentDecltypeType(const ASTContext &Context, Expr *E) - : DecltypeType(E, Context.DependentTy), Context(Context) {} +DependentDecltypeType::DependentDecltypeType(Expr *E, QualType UnderlyingType) + : DecltypeType(E, UnderlyingType) {} void DependentDecltypeType::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, Expr *E) { E->Profile(ID, Context, true); } +PackIndexingType::PackIndexingType(const ASTContext &Context, + QualType Canonical, QualType Pattern, + Expr *IndexExpr, + ArrayRef<QualType> Expansions) + : Type(PackIndexing, Canonical, + computeDependence(Pattern, IndexExpr, Expansions)), + Context(Context), Pattern(Pattern), IndexExpr(IndexExpr), + Size(Expansions.size()) { + + std::uninitialized_copy(Expansions.begin(), Expansions.end(), + getTrailingObjects<QualType>()); +} + +std::optional<unsigned> PackIndexingType::getSelectedIndex() const { + if (isInstantiationDependentType()) + return std::nullopt; + // Should only be not a constant for error recovery. + ConstantExpr *CE = dyn_cast<ConstantExpr>(getIndexExpr()); + if (!CE) + return std::nullopt; + auto Index = CE->getResultAsAPSInt(); + assert(Index.isNonNegative() && "Invalid index"); + return static_cast<unsigned>(Index.getExtValue()); +} + +TypeDependence +PackIndexingType::computeDependence(QualType Pattern, Expr *IndexExpr, + ArrayRef<QualType> Expansions) { + TypeDependence IndexD = toTypeDependence(IndexExpr->getDependence()); + + TypeDependence TD = IndexD | (IndexExpr->isInstantiationDependent() + ? TypeDependence::DependentInstantiation + : TypeDependence::None); + if (Expansions.empty()) + TD |= Pattern->getDependence() & TypeDependence::DependentInstantiation; + else + for (const QualType &T : Expansions) + TD |= T->getDependence(); + + if (!(IndexD & TypeDependence::UnexpandedPack)) + TD &= ~TypeDependence::UnexpandedPack; + + // If the pattern does not contain an unexpended pack, + // the type is still dependent, and invalid + if (!Pattern->containsUnexpandedParameterPack()) + TD |= TypeDependence::Error | TypeDependence::DependentInstantiation; + + return TD; +} + +void PackIndexingType::Profile(llvm::FoldingSetNodeID &ID, + const ASTContext &Context, QualType Pattern, + Expr *E) { + Pattern.Profile(ID); + E->Profile(ID, Context, true); +} + UnaryTransformType::UnaryTransformType(QualType BaseType, QualType UnderlyingType, UTTKind UKind, QualType CanonicalType) @@ -3476,7 +4058,7 @@ TagType::TagType(TypeClass TC, const TagDecl *D, QualType can) decl(const_cast<TagDecl *>(D)) {} static TagDecl *getInterestingTagDecl(TagDecl *decl) { - for (auto I : decl->redecls()) { + for (auto *I : decl->redecls()) { if (I->isCompleteDefinition() || I->isBeingDefined()) return I; } @@ -3505,7 +4087,7 @@ bool RecordType::hasConstFields() const { return true; FieldTy = FieldTy.getCanonicalType(); if (const auto *FieldRecTy = FieldTy->getAs<RecordType>()) { - if (llvm::find(RecordTypeList, FieldRecTy) == RecordTypeList.end()) + if (!llvm::is_contained(RecordTypeList, FieldRecTy)) RecordTypeList.push_back(FieldRecTy); } } @@ -3551,6 +4133,10 @@ bool AttributedType::isMSTypeSpec() const { llvm_unreachable("invalid attr kind"); } +bool AttributedType::isWebAssemblyFuncrefSpec() const { + return getAttrKind() == attr::WebAssemblyFuncref; +} + bool AttributedType::isCallingConv() const { // FIXME: Generate this with TableGen. switch (getAttrKind()) { @@ -3565,12 +4151,17 @@ bool AttributedType::isCallingConv() const { case attr::SwiftAsyncCall: case attr::VectorCall: case attr::AArch64VectorPcs: + case attr::AArch64SVEPcs: + case attr::AMDGPUKernelCall: case attr::Pascal: case attr::MSABI: case attr::SysVABI: case attr::IntelOclBicc: case attr::PreserveMost: case attr::PreserveAll: + case attr::M68kRTD: + case attr::PreserveNone: + case attr::RISCVVectorCC: return true; } llvm_unreachable("invalid attr kind"); @@ -3584,28 +4175,80 @@ IdentifierInfo *TemplateTypeParmType::getIdentifier() const { return isCanonicalUnqualified() ? nullptr : getDecl()->getIdentifier(); } +static const TemplateTypeParmDecl *getReplacedParameter(Decl *D, + unsigned Index) { + if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(D)) + return TTP; + return cast<TemplateTypeParmDecl>( + getReplacedTemplateParameterList(D)->getParam(Index)); +} + +SubstTemplateTypeParmType::SubstTemplateTypeParmType( + QualType Replacement, Decl *AssociatedDecl, unsigned Index, + std::optional<unsigned> PackIndex) + : Type(SubstTemplateTypeParm, Replacement.getCanonicalType(), + Replacement->getDependence()), + AssociatedDecl(AssociatedDecl) { + SubstTemplateTypeParmTypeBits.HasNonCanonicalUnderlyingType = + Replacement != getCanonicalTypeInternal(); + if (SubstTemplateTypeParmTypeBits.HasNonCanonicalUnderlyingType) + *getTrailingObjects<QualType>() = Replacement; + + SubstTemplateTypeParmTypeBits.Index = Index; + SubstTemplateTypeParmTypeBits.PackIndex = PackIndex ? *PackIndex + 1 : 0; + assert(AssociatedDecl != nullptr); +} + +const TemplateTypeParmDecl * +SubstTemplateTypeParmType::getReplacedParameter() const { + return ::getReplacedParameter(getAssociatedDecl(), getIndex()); +} + SubstTemplateTypeParmPackType::SubstTemplateTypeParmPackType( - const TemplateTypeParmType *Param, QualType Canon, + QualType Canon, Decl *AssociatedDecl, unsigned Index, bool Final, const TemplateArgument &ArgPack) : Type(SubstTemplateTypeParmPack, Canon, TypeDependence::DependentInstantiation | TypeDependence::UnexpandedPack), - Replaced(Param), Arguments(ArgPack.pack_begin()) { + Arguments(ArgPack.pack_begin()), + AssociatedDeclAndFinal(AssociatedDecl, Final) { + SubstTemplateTypeParmPackTypeBits.Index = Index; SubstTemplateTypeParmPackTypeBits.NumArgs = ArgPack.pack_size(); + assert(AssociatedDecl != nullptr); +} + +Decl *SubstTemplateTypeParmPackType::getAssociatedDecl() const { + return AssociatedDeclAndFinal.getPointer(); +} + +bool SubstTemplateTypeParmPackType::getFinal() const { + return AssociatedDeclAndFinal.getInt(); +} + +const TemplateTypeParmDecl * +SubstTemplateTypeParmPackType::getReplacedParameter() const { + return ::getReplacedParameter(getAssociatedDecl(), getIndex()); +} + +IdentifierInfo *SubstTemplateTypeParmPackType::getIdentifier() const { + return getReplacedParameter()->getIdentifier(); } TemplateArgument SubstTemplateTypeParmPackType::getArgumentPack() const { - return TemplateArgument(llvm::makeArrayRef(Arguments, getNumArgs())); + return TemplateArgument(llvm::ArrayRef(Arguments, getNumArgs())); } void SubstTemplateTypeParmPackType::Profile(llvm::FoldingSetNodeID &ID) { - Profile(ID, getReplacedParameter(), getArgumentPack()); + Profile(ID, getAssociatedDecl(), getIndex(), getFinal(), getArgumentPack()); } void SubstTemplateTypeParmPackType::Profile(llvm::FoldingSetNodeID &ID, - const TemplateTypeParmType *Replaced, + const Decl *AssociatedDecl, + unsigned Index, bool Final, const TemplateArgument &ArgPack) { - ID.AddPointer(Replaced); + ID.AddPointer(AssociatedDecl); + ID.AddInteger(Index); + ID.AddBoolean(Final); ID.AddInteger(ArgPack.pack_size()); for (const auto &P : ArgPack.pack_elements()) ID.AddPointer(P.getAsType().getAsOpaquePtr()); @@ -3639,8 +4282,7 @@ TemplateSpecializationType::TemplateSpecializationType( : Type(TemplateSpecialization, Canon.isNull() ? QualType(this, 0) : Canon, (Canon.isNull() ? TypeDependence::DependentInstantiation - : Canon->getDependence() & ~(TypeDependence::VariablyModified | - TypeDependence::UnexpandedPack)) | + : toSemanticDependence(Canon->getDependence())) | (toTypeDependence(T.getDependence()) & TypeDependence::UnexpandedPack)), Template(T) { @@ -3651,7 +4293,9 @@ TemplateSpecializationType::TemplateSpecializationType( "Use DependentTemplateSpecializationType for dependent template-name"); assert((T.getKind() == TemplateName::Template || T.getKind() == TemplateName::SubstTemplateTemplateParm || - T.getKind() == TemplateName::SubstTemplateTemplateParmPack) && + T.getKind() == TemplateName::SubstTemplateTemplateParmPack || + T.getKind() == TemplateName::UsingTemplate || + T.getKind() == TemplateName::QualifiedTemplate) && "Unexpected template name for TemplateSpecializationType"); auto *TemplateArgs = reinterpret_cast<TemplateArgument *>(this + 1); @@ -3675,10 +4319,22 @@ TemplateSpecializationType::TemplateSpecializationType( // Store the aliased type if this is a type alias template specialization. if (isTypeAlias()) { auto *Begin = reinterpret_cast<TemplateArgument *>(this + 1); - *reinterpret_cast<QualType*>(Begin + getNumArgs()) = AliasedType; + *reinterpret_cast<QualType *>(Begin + Args.size()) = AliasedType; } } +QualType TemplateSpecializationType::getAliasedType() const { + assert(isTypeAlias() && "not a type alias template specialization"); + return *reinterpret_cast<const QualType *>(template_arguments().end()); +} + +void TemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID, + const ASTContext &Ctx) { + Profile(ID, Template, template_arguments(), Ctx); + if (isTypeAlias()) + getAliasedType().Profile(ID); +} + void TemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID, TemplateName T, @@ -3715,14 +4371,14 @@ void ObjCObjectTypeImpl::Profile(llvm::FoldingSetNodeID &ID, for (auto typeArg : typeArgs) ID.AddPointer(typeArg.getAsOpaquePtr()); ID.AddInteger(protocols.size()); - for (auto proto : protocols) + for (auto *proto : protocols) ID.AddPointer(proto); ID.AddBoolean(isKindOf); } void ObjCObjectTypeImpl::Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, getBaseType(), getTypeArgsAsWritten(), - llvm::makeArrayRef(qual_begin(), getNumProtocols()), + llvm::ArrayRef(qual_begin(), getNumProtocols()), isKindOfTypeAsWritten()); } @@ -3733,13 +4389,13 @@ void ObjCTypeParamType::Profile(llvm::FoldingSetNodeID &ID, ID.AddPointer(OTPDecl); ID.AddPointer(CanonicalType.getAsOpaquePtr()); ID.AddInteger(protocols.size()); - for (auto proto : protocols) + for (auto *proto : protocols) ID.AddPointer(proto); } void ObjCTypeParamType::Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, getDecl(), getCanonicalTypeInternal(), - llvm::makeArrayRef(qual_begin(), getNumProtocols())); + llvm::ArrayRef(qual_begin(), getNumProtocols())); } namespace { @@ -3757,8 +4413,8 @@ public: friend CachedProperties merge(CachedProperties L, CachedProperties R) { Linkage MergedLinkage = minLinkage(L.L, R.L); - return CachedProperties(MergedLinkage, - L.hasLocalOrUnnamedType() | R.hasLocalOrUnnamedType()); + return CachedProperties(MergedLinkage, L.hasLocalOrUnnamedType() || + R.hasLocalOrUnnamedType()); } }; @@ -3801,7 +4457,7 @@ public: // Compute the cached properties and then set the cache. CachedProperties Result = computeCachedProperties(T); T->TypeBits.CacheValid = true; - T->TypeBits.CachedLinkage = Result.getLinkage(); + T->TypeBits.CachedLinkage = llvm::to_underlying(Result.getLinkage()); T->TypeBits.CachedLocalOrUnnamed = Result.hasLocalOrUnnamedType(); } }; @@ -3831,22 +4487,21 @@ static CachedProperties computeCachedProperties(const Type *T) { #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class,Base) case Type::Class: #include "clang/AST/TypeNodes.inc" // Treat instantiation-dependent types as external. - if (!T->isInstantiationDependentType()) T->dump(); assert(T->isInstantiationDependentType()); - return CachedProperties(ExternalLinkage, false); + return CachedProperties(Linkage::External, false); case Type::Auto: case Type::DeducedTemplateSpecialization: // Give non-deduced 'auto' types external linkage. We should only see them // here in error recovery. - return CachedProperties(ExternalLinkage, false); + return CachedProperties(Linkage::External, false); - case Type::ExtInt: + case Type::BitInt: case Type::Builtin: // C++ [basic.link]p8: // A type is said to have linkage if and only if: // - it is a fundamental type (3.9.1); or - return CachedProperties(ExternalLinkage, false); + return CachedProperties(Linkage::External, false); case Type::Record: case Type::Enum: { @@ -3883,6 +4538,7 @@ static CachedProperties computeCachedProperties(const Type *T) { case Type::ConstantArray: case Type::IncompleteArray: case Type::VariableArray: + case Type::ArrayParameter: return Cache::get(cast<ArrayType>(T)->getElementType()); case Type::Vector: case Type::ExtVector: @@ -3941,7 +4597,7 @@ LinkageInfo LinkageComputer::computeTypeLinkageInfo(const Type *T) { assert(T->isInstantiationDependentType()); return LinkageInfo::external(); - case Type::ExtInt: + case Type::BitInt: case Type::Builtin: return LinkageInfo::external(); @@ -3971,6 +4627,7 @@ LinkageInfo LinkageComputer::computeTypeLinkageInfo(const Type *T) { case Type::ConstantArray: case Type::IncompleteArray: case Type::VariableArray: + case Type::ArrayParameter: return computeTypeLinkageInfo(cast<ArrayType>(T)->getElementType()); case Type::Vector: case Type::ExtVector: @@ -4026,8 +4683,7 @@ LinkageInfo Type::getLinkageAndVisibility() const { return LinkageComputer{}.getTypeLinkageAndVisibility(this); } -Optional<NullabilityKind> -Type::getNullability(const ASTContext &Context) const { +std::optional<NullabilityKind> Type::getNullability() const { QualType Type(this, 0); while (const auto *AT = Type->getAs<AttributedType>()) { // Check whether this is an attributed type with nullability @@ -4037,7 +4693,7 @@ Type::getNullability(const ASTContext &Context) const { Type = AT->getEquivalentType(); } - return None; + return std::nullopt; } bool Type::canHaveNullability(bool ResultIfUnknown) const { @@ -4063,6 +4719,7 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const { case Type::TypeOfExpr: case Type::TypeOf: case Type::Decltype: + case Type::PackIndexing: case Type::UnaryTransform: case Type::TemplateTypeParm: case Type::SubstTemplateTypeParmPack: @@ -4071,16 +4728,15 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const { case Type::Auto: return ResultIfUnknown; - // Dependent template specializations can instantiate to pointer - // types unless they're known to be specializations of a class - // template. + // Dependent template specializations could instantiate to pointer types. case Type::TemplateSpecialization: - if (TemplateDecl *templateDecl - = cast<TemplateSpecializationType>(type.getTypePtr()) - ->getTemplateName().getAsTemplateDecl()) { - if (isa<ClassTemplateDecl>(templateDecl)) - return false; - } + // If it's a known class template, we can already check if it's nullable. + if (TemplateDecl *templateDecl = + cast<TemplateSpecializationType>(type.getTypePtr()) + ->getTemplateName() + .getAsTemplateDecl()) + if (auto *CTD = dyn_cast<ClassTemplateDecl>(templateDecl)) + return CTD->getTemplatedDecl()->hasAttr<TypeNullableAttr>(); return ResultIfUnknown; case Type::Builtin: @@ -4093,6 +4749,7 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const { #include "clang/AST/BuiltinTypes.def" return false; + case BuiltinType::UnresolvedTemplate: // Dependent types that could instantiate to a pointer type. case BuiltinType::Dependent: case BuiltinType::Overload: @@ -4125,16 +4782,31 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const { #include "clang/Basic/PPCTypes.def" #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: #include "clang/Basic/RISCVVTypes.def" +#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id: +#include "clang/Basic/WebAssemblyReferenceTypes.def" +#define AMDGPU_TYPE(Name, Id, SingletonId) case BuiltinType::Id: +#include "clang/Basic/AMDGPUTypes.def" case BuiltinType::BuiltinFn: case BuiltinType::NullPtr: case BuiltinType::IncompleteMatrixIdx: - case BuiltinType::OMPArraySection: + case BuiltinType::ArraySection: case BuiltinType::OMPArrayShaping: case BuiltinType::OMPIterator: return false; } llvm_unreachable("unknown builtin type"); + case Type::Record: { + const RecordDecl *RD = cast<RecordType>(type)->getDecl(); + // For template specializations, look only at primary template attributes. + // This is a consistent regardless of whether the instantiation is known. + if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(RD)) + return CTSD->getSpecializedTemplate() + ->getTemplatedDecl() + ->hasAttr<TypeNullableAttr>(); + return RD->hasAttr<TypeNullableAttr>(); + } + // Non-pointer types. case Type::Complex: case Type::LValueReference: @@ -4152,7 +4824,6 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const { case Type::DependentAddressSpace: case Type::FunctionProto: case Type::FunctionNoProto: - case Type::Record: case Type::DeducedTemplateSpecialization: case Type::Enum: case Type::InjectedClassName: @@ -4161,15 +4832,15 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const { case Type::ObjCInterface: case Type::Atomic: case Type::Pipe: - case Type::ExtInt: - case Type::DependentExtInt: + case Type::BitInt: + case Type::DependentBitInt: + case Type::ArrayParameter: return false; } llvm_unreachable("bad type kind!"); } -llvm::Optional<NullabilityKind> -AttributedType::getImmediateNullability() const { +std::optional<NullabilityKind> AttributedType::getImmediateNullability() const { if (getAttrKind() == attr::TypeNonNull) return NullabilityKind::NonNull; if (getAttrKind() == attr::TypeNullable) @@ -4178,10 +4849,11 @@ AttributedType::getImmediateNullability() const { return NullabilityKind::Unspecified; if (getAttrKind() == attr::TypeNullableResult) return NullabilityKind::NullableResult; - return None; + return std::nullopt; } -Optional<NullabilityKind> AttributedType::stripOuterNullability(QualType &T) { +std::optional<NullabilityKind> +AttributedType::stripOuterNullability(QualType &T) { QualType AttrTy = T; if (auto MacroTy = dyn_cast<MacroQualifiedType>(T)) AttrTy = MacroTy->getUnderlyingType(); @@ -4193,7 +4865,7 @@ Optional<NullabilityKind> AttributedType::stripOuterNullability(QualType &T) { } } - return None; + return std::nullopt; } bool Type::isBlockCompatibleObjCPointerType(ASTContext &ctx) const { @@ -4255,20 +4927,13 @@ bool Type::isObjCARCImplicitlyUnretainedType() const { } bool Type::isObjCNSObjectType() const { - const Type *cur = this; - while (true) { - if (const auto *typedefType = dyn_cast<TypedefType>(cur)) - return typedefType->getDecl()->hasAttr<ObjCNSObjectAttr>(); - - // Single-step desugar until we run out of sugar. - QualType next = cur->getLocallyUnqualifiedSingleStepDesugaredType(); - if (next.getTypePtr() == cur) return false; - cur = next.getTypePtr(); - } + if (const auto *typedefType = getAs<TypedefType>()) + return typedefType->getDecl()->hasAttr<ObjCNSObjectAttr>(); + return false; } bool Type::isObjCIndependentClassType() const { - if (const auto *typedefType = dyn_cast<TypedefType>(this)) + if (const auto *typedefType = getAs<TypedefType>()) return typedefType->getDecl()->hasAttr<ObjCIndependentClassAttr>(); return false; } @@ -4392,16 +5057,20 @@ void clang::FixedPointValueToString(SmallVectorImpl<char> &Str, } AutoType::AutoType(QualType DeducedAsType, AutoTypeKeyword Keyword, - TypeDependence ExtraDependence, + TypeDependence ExtraDependence, QualType Canon, ConceptDecl *TypeConstraintConcept, ArrayRef<TemplateArgument> TypeConstraintArgs) - : DeducedType(Auto, DeducedAsType, ExtraDependence) { - AutoTypeBits.Keyword = (unsigned)Keyword; + : DeducedType(Auto, DeducedAsType, ExtraDependence, Canon) { + AutoTypeBits.Keyword = llvm::to_underlying(Keyword); AutoTypeBits.NumArgs = TypeConstraintArgs.size(); this->TypeConstraintConcept = TypeConstraintConcept; + assert(TypeConstraintConcept || AutoTypeBits.NumArgs == 0); if (TypeConstraintConcept) { - TemplateArgument *ArgBuffer = getArgBuffer(); + auto *ArgBuffer = + const_cast<TemplateArgument *>(getTypeConstraintArguments().data()); for (const TemplateArgument &Arg : TypeConstraintArgs) { + // We only syntactically depend on the constraint arguments. They don't + // affect the deduced type, only its validity. addDependence( toSyntacticDependence(toTypeDependence(Arg.getDependence()))); @@ -4421,3 +5090,239 @@ void AutoType::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, for (const TemplateArgument &Arg : Arguments) Arg.Profile(ID, Context); } + +void AutoType::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) { + Profile(ID, Context, getDeducedType(), getKeyword(), isDependentType(), + getTypeConstraintConcept(), getTypeConstraintArguments()); +} + +FunctionEffect::Kind FunctionEffect::oppositeKind() const { + switch (kind()) { + case Kind::NonBlocking: + return Kind::Blocking; + case Kind::Blocking: + return Kind::NonBlocking; + case Kind::NonAllocating: + return Kind::Allocating; + case Kind::Allocating: + return Kind::NonAllocating; + case Kind::None: + return Kind::None; + } + llvm_unreachable("unknown effect kind"); +} + +StringRef FunctionEffect::name() const { + switch (kind()) { + case Kind::NonBlocking: + return "nonblocking"; + case Kind::NonAllocating: + return "nonallocating"; + case Kind::Blocking: + return "blocking"; + case Kind::Allocating: + return "allocating"; + case Kind::None: + return "(none)"; + } + llvm_unreachable("unknown effect kind"); +} + +bool FunctionEffect::canInferOnFunction(const Decl &Callee) const { + switch (kind()) { + case Kind::NonAllocating: + case Kind::NonBlocking: { + FunctionEffectsRef CalleeFX; + if (auto *FD = Callee.getAsFunction()) + CalleeFX = FD->getFunctionEffects(); + else if (auto *BD = dyn_cast<BlockDecl>(&Callee)) + CalleeFX = BD->getFunctionEffects(); + else + return false; + for (const FunctionEffectWithCondition &CalleeEC : CalleeFX) { + // nonblocking/nonallocating cannot call allocating. + if (CalleeEC.Effect.kind() == Kind::Allocating) + return false; + // nonblocking cannot call blocking. + if (kind() == Kind::NonBlocking && + CalleeEC.Effect.kind() == Kind::Blocking) + return false; + } + return true; + } + + case Kind::Allocating: + case Kind::Blocking: + return false; + + case Kind::None: + assert(0 && "canInferOnFunction with None"); + break; + } + llvm_unreachable("unknown effect kind"); +} + +bool FunctionEffect::shouldDiagnoseFunctionCall( + bool Direct, ArrayRef<FunctionEffect> CalleeFX) const { + switch (kind()) { + case Kind::NonAllocating: + case Kind::NonBlocking: { + const Kind CallerKind = kind(); + for (const auto &Effect : CalleeFX) { + const Kind EK = Effect.kind(); + // Does callee have same or stronger constraint? + if (EK == CallerKind || + (CallerKind == Kind::NonAllocating && EK == Kind::NonBlocking)) { + return false; // no diagnostic + } + } + return true; // warning + } + case Kind::Allocating: + case Kind::Blocking: + return false; + case Kind::None: + assert(0 && "shouldDiagnoseFunctionCall with None"); + break; + } + llvm_unreachable("unknown effect kind"); +} + +// ===== + +bool FunctionEffectSet::insert(const FunctionEffectWithCondition &NewEC, + Conflicts &Errs) { + FunctionEffect::Kind NewOppositeKind = NewEC.Effect.oppositeKind(); + Expr *NewCondition = NewEC.Cond.getCondition(); + + // The index at which insertion will take place; default is at end + // but we might find an earlier insertion point. + unsigned InsertIdx = Effects.size(); + unsigned Idx = 0; + for (const FunctionEffectWithCondition &EC : *this) { + // Note about effects with conditions: They are considered distinct from + // those without conditions; they are potentially unique, redundant, or + // in conflict, but we can't tell which until the condition is evaluated. + if (EC.Cond.getCondition() == nullptr && NewCondition == nullptr) { + if (EC.Effect.kind() == NewEC.Effect.kind()) { + // There is no condition, and the effect kind is already present, + // so just fail to insert the new one (creating a duplicate), + // and return success. + return true; + } + + if (EC.Effect.kind() == NewOppositeKind) { + Errs.push_back({EC, NewEC}); + return false; + } + } + + if (NewEC.Effect.kind() < EC.Effect.kind() && InsertIdx > Idx) + InsertIdx = Idx; + + ++Idx; + } + + if (NewCondition || !Conditions.empty()) { + if (Conditions.empty() && !Effects.empty()) + Conditions.resize(Effects.size()); + Conditions.insert(Conditions.begin() + InsertIdx, + NewEC.Cond.getCondition()); + } + Effects.insert(Effects.begin() + InsertIdx, NewEC.Effect); + return true; +} + +bool FunctionEffectSet::insert(const FunctionEffectsRef &Set, Conflicts &Errs) { + for (const auto &Item : Set) + insert(Item, Errs); + return Errs.empty(); +} + +FunctionEffectSet FunctionEffectSet::getIntersection(FunctionEffectsRef LHS, + FunctionEffectsRef RHS) { + FunctionEffectSet Result; + FunctionEffectSet::Conflicts Errs; + + // We could use std::set_intersection but that would require expanding the + // container interface to include push_back, making it available to clients + // who might fail to maintain invariants. + auto IterA = LHS.begin(), EndA = LHS.end(); + auto IterB = RHS.begin(), EndB = RHS.end(); + + auto FEWCLess = [](const FunctionEffectWithCondition &LHS, + const FunctionEffectWithCondition &RHS) { + return std::tuple(LHS.Effect, uintptr_t(LHS.Cond.getCondition())) < + std::tuple(RHS.Effect, uintptr_t(RHS.Cond.getCondition())); + }; + + while (IterA != EndA && IterB != EndB) { + FunctionEffectWithCondition A = *IterA; + FunctionEffectWithCondition B = *IterB; + if (FEWCLess(A, B)) + ++IterA; + else if (FEWCLess(B, A)) + ++IterB; + else { + Result.insert(A, Errs); + ++IterA; + ++IterB; + } + } + + // Insertion shouldn't be able to fail; that would mean both input + // sets contained conflicts. + assert(Errs.empty() && "conflict shouldn't be possible in getIntersection"); + + return Result; +} + +FunctionEffectSet FunctionEffectSet::getUnion(FunctionEffectsRef LHS, + FunctionEffectsRef RHS, + Conflicts &Errs) { + // Optimize for either of the two sets being empty (very common). + if (LHS.empty()) + return FunctionEffectSet(RHS); + + FunctionEffectSet Combined(LHS); + Combined.insert(RHS, Errs); + return Combined; +} + +LLVM_DUMP_METHOD void FunctionEffectsRef::dump(llvm::raw_ostream &OS) const { + OS << "Effects{"; + bool First = true; + for (const auto &CFE : *this) { + if (!First) + OS << ", "; + else + First = false; + OS << CFE.Effect.name(); + if (Expr *E = CFE.Cond.getCondition()) { + OS << '('; + E->dump(); + OS << ')'; + } + } + OS << "}"; +} + +LLVM_DUMP_METHOD void FunctionEffectSet::dump(llvm::raw_ostream &OS) const { + FunctionEffectsRef(*this).dump(OS); +} + +FunctionEffectsRef +FunctionEffectsRef::create(ArrayRef<FunctionEffect> FX, + ArrayRef<EffectConditionExpr> Conds) { + assert(std::is_sorted(FX.begin(), FX.end()) && "effects should be sorted"); + assert((Conds.empty() || Conds.size() == FX.size()) && + "effects size should match conditions size"); + return FunctionEffectsRef(FX, Conds); +} + +std::string FunctionEffectWithCondition::description() const { + std::string Result(Effect.name().str()); + if (Cond.getCondition() != nullptr) + Result += "(expr)"; + return Result; +} diff --git a/contrib/llvm-project/clang/lib/AST/TypeLoc.cpp b/contrib/llvm-project/clang/lib/AST/TypeLoc.cpp index 16d953b4bece..33e6ccbadc12 100644 --- a/contrib/llvm-project/clang/lib/AST/TypeLoc.cpp +++ b/contrib/llvm-project/clang/lib/AST/TypeLoc.cpp @@ -11,9 +11,10 @@ //===----------------------------------------------------------------------===// #include "clang/AST/TypeLoc.h" -#include "clang/AST/DeclTemplate.h" +#include "clang/AST/ASTConcept.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Attr.h" +#include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/TemplateBase.h" @@ -194,15 +195,21 @@ SourceLocation TypeLoc::getBeginLoc() const { while (true) { switch (Cur.getTypeLocClass()) { case Elaborated: - LeftMost = Cur; - break; + if (Cur.getLocalSourceRange().getBegin().isValid()) { + LeftMost = Cur; + break; + } + Cur = Cur.getNextTypeLoc(); + if (Cur.isNull()) + break; + continue; case FunctionProto: if (Cur.castAs<FunctionProtoTypeLoc>().getTypePtr() ->hasTrailingReturn()) { LeftMost = Cur; break; } - LLVM_FALLTHROUGH; + [[fallthrough]]; case FunctionNoProto: case ConstantArray: case DependentSizedArray: @@ -240,6 +247,8 @@ SourceLocation TypeLoc::getEndLoc() const { case IncompleteArray: case VariableArray: case FunctionNoProto: + // The innermost type with suffix syntax always determines the end of the + // type. Last = Cur; break; case FunctionProto: @@ -248,12 +257,19 @@ SourceLocation TypeLoc::getEndLoc() const { else Last = Cur; break; + case ObjCObjectPointer: + // `id` and `id<...>` have no star location. + if (Cur.castAs<ObjCObjectPointerTypeLoc>().getStarLoc().isInvalid()) + break; + [[fallthrough]]; case Pointer: case BlockPointer: case MemberPointer: case LValueReference: case RValueReference: case PackExpansion: + // Types with prefix syntax only determine the end of the type if there + // is no suffix type. if (!Last) Last = Cur; break; @@ -351,6 +367,7 @@ TypeSpecifierType BuiltinTypeLoc::getWrittenTypeSpec() const { case BuiltinType::LongDouble: case BuiltinType::Float16: case BuiltinType::Float128: + case BuiltinType::Ibm128: case BuiltinType::ShortAccum: case BuiltinType::Accum: case BuiltinType::LongAccum: @@ -382,6 +399,7 @@ TypeSpecifierType BuiltinTypeLoc::getWrittenTypeSpec() const { case BuiltinType::NullPtr: case BuiltinType::Overload: case BuiltinType::Dependent: + case BuiltinType::UnresolvedTemplate: case BuiltinType::BoundMember: case BuiltinType::UnknownAny: case BuiltinType::ARCUnbridgedCast: @@ -408,9 +426,13 @@ TypeSpecifierType BuiltinTypeLoc::getWrittenTypeSpec() const { #include "clang/Basic/PPCTypes.def" #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: #include "clang/Basic/RISCVVTypes.def" +#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id: +#include "clang/Basic/WebAssemblyReferenceTypes.def" +#define AMDGPU_TYPE(Name, Id, SingletonId) case BuiltinType::Id: +#include "clang/Basic/AMDGPUTypes.def" case BuiltinType::BuiltinFn: case BuiltinType::IncompleteMatrixIdx: - case BuiltinType::OMPArraySection: + case BuiltinType::ArraySection: case BuiltinType::OMPArrayShaping: case BuiltinType::OMPIterator: return TST_unspecified; @@ -497,12 +519,20 @@ SourceRange AttributedTypeLoc::getLocalSourceRange() const { return getAttr() ? getAttr()->getRange() : SourceRange(); } +SourceRange CountAttributedTypeLoc::getLocalSourceRange() const { + return getCountExpr() ? getCountExpr()->getSourceRange() : SourceRange(); +} + +SourceRange BTFTagAttributedTypeLoc::getLocalSourceRange() const { + return getAttr() ? getAttr()->getRange() : SourceRange(); +} + void TypeOfTypeLoc::initializeLocal(ASTContext &Context, SourceLocation Loc) { TypeofLikeTypeLoc<TypeOfTypeLoc, TypeOfType, TypeOfTypeLocInfo> ::initializeLocal(Context, Loc); - this->getLocalData()->UnderlyingTInfo = Context.getTrivialTypeSourceInfo( - getUnderlyingType(), Loc); + this->getLocalData()->UnmodifiedTInfo = + Context.getTrivialTypeSourceInfo(getUnmodifiedType(), Loc); } void UnaryTransformTypeLoc::initializeLocal(ASTContext &Context, @@ -516,6 +546,8 @@ void UnaryTransformTypeLoc::initializeLocal(ASTContext &Context, void ElaboratedTypeLoc::initializeLocal(ASTContext &Context, SourceLocation Loc) { + if (isEmpty()) + return; setElaboratedKeywordLoc(Loc); NestedNameSpecifierLocBuilder Builder; Builder.MakeTrivial(Context, getTypePtr()->getQualifier(), Loc); @@ -546,17 +578,14 @@ DependentTemplateSpecializationTypeLoc::initializeLocal(ASTContext &Context, setTemplateNameLoc(Loc); setLAngleLoc(Loc); setRAngleLoc(Loc); - TemplateSpecializationTypeLoc::initializeArgLocs(Context, getNumArgs(), - getTypePtr()->getArgs(), - getArgInfos(), Loc); + TemplateSpecializationTypeLoc::initializeArgLocs( + Context, getTypePtr()->template_arguments(), getArgInfos(), Loc); } -void TemplateSpecializationTypeLoc::initializeArgLocs(ASTContext &Context, - unsigned NumArgs, - const TemplateArgument *Args, - TemplateArgumentLocInfo *ArgInfos, - SourceLocation Loc) { - for (unsigned i = 0, e = NumArgs; i != e; ++i) { +void TemplateSpecializationTypeLoc::initializeArgLocs( + ASTContext &Context, ArrayRef<TemplateArgument> Args, + TemplateArgumentLocInfo *ArgInfos, SourceLocation Loc) { + for (unsigned i = 0, e = Args.size(); i != e; ++i) { switch (Args[i].getKind()) { case TemplateArgument::Null: llvm_unreachable("Impossible TemplateArgument"); @@ -564,6 +593,7 @@ void TemplateSpecializationTypeLoc::initializeArgLocs(ASTContext &Context, case TemplateArgument::Integral: case TemplateArgument::Declaration: case TemplateArgument::NullPtr: + case TemplateArgument::StructuralValue: ArgInfos[i] = TemplateArgumentLocInfo(); break; @@ -600,25 +630,43 @@ void TemplateSpecializationTypeLoc::initializeArgLocs(ASTContext &Context, } } -DeclarationNameInfo AutoTypeLoc::getConceptNameInfo() const { - return DeclarationNameInfo(getNamedConcept()->getDeclName(), - getLocalData()->ConceptNameLoc); +// Builds a ConceptReference where all locations point at the same token, +// for use in trivial TypeSourceInfo for constrained AutoType +static ConceptReference *createTrivialConceptReference(ASTContext &Context, + SourceLocation Loc, + const AutoType *AT) { + DeclarationNameInfo DNI = + DeclarationNameInfo(AT->getTypeConstraintConcept()->getDeclName(), Loc, + AT->getTypeConstraintConcept()->getDeclName()); + unsigned size = AT->getTypeConstraintArguments().size(); + TemplateArgumentLocInfo *TALI = new TemplateArgumentLocInfo[size]; + TemplateSpecializationTypeLoc::initializeArgLocs( + Context, AT->getTypeConstraintArguments(), TALI, Loc); + TemplateArgumentListInfo TAListI; + for (unsigned i = 0; i < size; ++i) { + TAListI.addArgument( + TemplateArgumentLoc(AT->getTypeConstraintArguments()[i], + TALI[i])); // TemplateArgumentLocInfo() + } + + auto *ConceptRef = ConceptReference::Create( + Context, NestedNameSpecifierLoc{}, Loc, DNI, nullptr, + AT->getTypeConstraintConcept(), + ASTTemplateArgumentListInfo::Create(Context, TAListI)); + delete[] TALI; + return ConceptRef; } void AutoTypeLoc::initializeLocal(ASTContext &Context, SourceLocation Loc) { - setNestedNameSpecifierLoc(NestedNameSpecifierLoc()); - setTemplateKWLoc(Loc); - setConceptNameLoc(Loc); - setFoundDecl(nullptr); - setRAngleLoc(Loc); - setLAngleLoc(Loc); - TemplateSpecializationTypeLoc::initializeArgLocs(Context, getNumArgs(), - getTypePtr()->getArgs(), - getArgInfos(), Loc); + setRParenLoc(Loc); setNameLoc(Loc); + setConceptReference(nullptr); + if (getTypePtr()->isConstrained()) { + setConceptReference( + createTrivialConceptReference(Context, Loc, getTypePtr())); + } } - namespace { class GetContainedAutoTypeLocVisitor : @@ -672,6 +720,10 @@ namespace { return Visit(T.getModifiedLoc()); } + TypeLoc VisitBTFTagAttributedTypeLoc(BTFTagAttributedTypeLoc T) { + return Visit(T.getWrappedLoc()); + } + TypeLoc VisitMacroQualifiedTypeLoc(MacroQualifiedTypeLoc T) { return Visit(T.getInnerLoc()); } @@ -693,3 +745,12 @@ AutoTypeLoc TypeLoc::getContainedAutoTypeLoc() const { return AutoTypeLoc(); return Res.getAs<AutoTypeLoc>(); } + +SourceLocation TypeLoc::getTemplateKeywordLoc() const { + if (const auto TSTL = getAsAdjusted<TemplateSpecializationTypeLoc>()) + return TSTL.getTemplateKeywordLoc(); + if (const auto DTSTL = + getAsAdjusted<DependentTemplateSpecializationTypeLoc>()) + return DTSTL.getTemplateKeywordLoc(); + return SourceLocation(); +} diff --git a/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp b/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp index 5de22f76f458..ffec3ef9d226 100644 --- a/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp +++ b/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp @@ -22,6 +22,7 @@ #include "clang/AST/PrettyPrinter.h" #include "clang/AST/TemplateBase.h" #include "clang/AST/TemplateName.h" +#include "clang/AST/TextNodeDumper.h" #include "clang/AST/Type.h" #include "clang/Basic/AddressSpaces.h" #include "clang/Basic/ExceptionSpecificationType.h" @@ -32,6 +33,7 @@ #include "clang/Basic/SourceManager.h" #include "clang/Basic/Specifiers.h" #include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/Twine.h" @@ -47,94 +49,103 @@ using namespace clang; namespace { - /// RAII object that enables printing of the ARC __strong lifetime - /// qualifier. - class IncludeStrongLifetimeRAII { - PrintingPolicy &Policy; - bool Old; - - public: - explicit IncludeStrongLifetimeRAII(PrintingPolicy &Policy) - : Policy(Policy), Old(Policy.SuppressStrongLifetime) { - if (!Policy.SuppressLifetimeQualifiers) - Policy.SuppressStrongLifetime = false; - } +/// RAII object that enables printing of the ARC __strong lifetime +/// qualifier. +class IncludeStrongLifetimeRAII { + PrintingPolicy &Policy; + bool Old; + +public: + explicit IncludeStrongLifetimeRAII(PrintingPolicy &Policy) + : Policy(Policy), Old(Policy.SuppressStrongLifetime) { + if (!Policy.SuppressLifetimeQualifiers) + Policy.SuppressStrongLifetime = false; + } - ~IncludeStrongLifetimeRAII() { - Policy.SuppressStrongLifetime = Old; - } - }; + ~IncludeStrongLifetimeRAII() { Policy.SuppressStrongLifetime = Old; } +}; - class ParamPolicyRAII { - PrintingPolicy &Policy; - bool Old; +class ParamPolicyRAII { + PrintingPolicy &Policy; + bool Old; - public: - explicit ParamPolicyRAII(PrintingPolicy &Policy) - : Policy(Policy), Old(Policy.SuppressSpecifiers) { - Policy.SuppressSpecifiers = false; - } +public: + explicit ParamPolicyRAII(PrintingPolicy &Policy) + : Policy(Policy), Old(Policy.SuppressSpecifiers) { + Policy.SuppressSpecifiers = false; + } - ~ParamPolicyRAII() { - Policy.SuppressSpecifiers = Old; - } - }; + ~ParamPolicyRAII() { Policy.SuppressSpecifiers = Old; } +}; - class ElaboratedTypePolicyRAII { - PrintingPolicy &Policy; - bool SuppressTagKeyword; - bool SuppressScope; +class DefaultTemplateArgsPolicyRAII { + PrintingPolicy &Policy; + bool Old; - public: - explicit ElaboratedTypePolicyRAII(PrintingPolicy &Policy) : Policy(Policy) { - SuppressTagKeyword = Policy.SuppressTagKeyword; - SuppressScope = Policy.SuppressScope; - Policy.SuppressTagKeyword = true; - Policy.SuppressScope = true; - } +public: + explicit DefaultTemplateArgsPolicyRAII(PrintingPolicy &Policy) + : Policy(Policy), Old(Policy.SuppressDefaultTemplateArgs) { + Policy.SuppressDefaultTemplateArgs = false; + } - ~ElaboratedTypePolicyRAII() { - Policy.SuppressTagKeyword = SuppressTagKeyword; - Policy.SuppressScope = SuppressScope; - } - }; - - class TypePrinter { - PrintingPolicy Policy; - unsigned Indentation; - bool HasEmptyPlaceHolder = false; - bool InsideCCAttribute = false; - - public: - explicit TypePrinter(const PrintingPolicy &Policy, unsigned Indentation = 0) - : Policy(Policy), Indentation(Indentation) {} - - void print(const Type *ty, Qualifiers qs, raw_ostream &OS, - StringRef PlaceHolder); - void print(QualType T, raw_ostream &OS, StringRef PlaceHolder); - - static bool canPrefixQualifiers(const Type *T, bool &NeedARCStrongQualifier); - void spaceBeforePlaceHolder(raw_ostream &OS); - void printTypeSpec(NamedDecl *D, raw_ostream &OS); - void printTemplateId(const TemplateSpecializationType *T, raw_ostream &OS, - bool FullyQualify); - - void printBefore(QualType T, raw_ostream &OS); - void printAfter(QualType T, raw_ostream &OS); - void AppendScope(DeclContext *DC, raw_ostream &OS, - DeclarationName NameInScope); - void printTag(TagDecl *T, raw_ostream &OS); - void printFunctionAfter(const FunctionType::ExtInfo &Info, raw_ostream &OS); + ~DefaultTemplateArgsPolicyRAII() { Policy.SuppressDefaultTemplateArgs = Old; } +}; + +class ElaboratedTypePolicyRAII { + PrintingPolicy &Policy; + bool SuppressTagKeyword; + bool SuppressScope; + +public: + explicit ElaboratedTypePolicyRAII(PrintingPolicy &Policy) : Policy(Policy) { + SuppressTagKeyword = Policy.SuppressTagKeyword; + SuppressScope = Policy.SuppressScope; + Policy.SuppressTagKeyword = true; + Policy.SuppressScope = true; + } + + ~ElaboratedTypePolicyRAII() { + Policy.SuppressTagKeyword = SuppressTagKeyword; + Policy.SuppressScope = SuppressScope; + } +}; + +class TypePrinter { + PrintingPolicy Policy; + unsigned Indentation; + bool HasEmptyPlaceHolder = false; + bool InsideCCAttribute = false; + +public: + explicit TypePrinter(const PrintingPolicy &Policy, unsigned Indentation = 0) + : Policy(Policy), Indentation(Indentation) {} + + void print(const Type *ty, Qualifiers qs, raw_ostream &OS, + StringRef PlaceHolder); + void print(QualType T, raw_ostream &OS, StringRef PlaceHolder); + + static bool canPrefixQualifiers(const Type *T, bool &NeedARCStrongQualifier); + void spaceBeforePlaceHolder(raw_ostream &OS); + void printTypeSpec(NamedDecl *D, raw_ostream &OS); + void printTemplateId(const TemplateSpecializationType *T, raw_ostream &OS, + bool FullyQualify); + + void printBefore(QualType T, raw_ostream &OS); + void printAfter(QualType T, raw_ostream &OS); + void AppendScope(DeclContext *DC, raw_ostream &OS, + DeclarationName NameInScope); + void printTag(TagDecl *T, raw_ostream &OS); + void printFunctionAfter(const FunctionType::ExtInfo &Info, raw_ostream &OS); #define ABSTRACT_TYPE(CLASS, PARENT) -#define TYPE(CLASS, PARENT) \ - void print##CLASS##Before(const CLASS##Type *T, raw_ostream &OS); \ - void print##CLASS##After(const CLASS##Type *T, raw_ostream &OS); +#define TYPE(CLASS, PARENT) \ + void print##CLASS##Before(const CLASS##Type *T, raw_ostream &OS); \ + void print##CLASS##After(const CLASS##Type *T, raw_ostream &OS); #include "clang/AST/TypeNodes.inc" - private: - void printBefore(const Type *ty, Qualifiers qs, raw_ostream &OS); - void printAfter(const Type *ty, Qualifiers qs, raw_ostream &OS); - }; +private: + void printBefore(const Type *ty, Qualifiers qs, raw_ostream &OS); + void printAfter(const Type *ty, Qualifiers qs, raw_ostream &OS); +}; } // namespace @@ -184,7 +195,7 @@ void TypePrinter::print(const Type *T, Qualifiers Quals, raw_ostream &OS, return; } - SaveAndRestore<bool> PHVal(HasEmptyPlaceHolder, PlaceHolder.empty()); + SaveAndRestore PHVal(HasEmptyPlaceHolder, PlaceHolder.empty()); printBefore(T, Quals, OS); OS << PlaceHolder; @@ -200,17 +211,19 @@ bool TypePrinter::canPrefixQualifiers(const Type *T, // type expands to a simple string. bool CanPrefixQualifiers = false; NeedARCStrongQualifier = false; - Type::TypeClass TC = T->getTypeClass(); + const Type *UnderlyingType = T; if (const auto *AT = dyn_cast<AutoType>(T)) - TC = AT->desugar()->getTypeClass(); + UnderlyingType = AT->desugar().getTypePtr(); if (const auto *Subst = dyn_cast<SubstTemplateTypeParmType>(T)) - TC = Subst->getReplacementType()->getTypeClass(); + UnderlyingType = Subst->getReplacementType().getTypePtr(); + Type::TypeClass TC = UnderlyingType->getTypeClass(); switch (TC) { case Type::Auto: case Type::Builtin: case Type::Complex: case Type::UnresolvedUsing: + case Type::Using: case Type::Typedef: case Type::TypeOfExpr: case Type::TypeOf: @@ -231,8 +244,9 @@ bool TypePrinter::canPrefixQualifiers(const Type *T, case Type::ObjCInterface: case Type::Atomic: case Type::Pipe: - case Type::ExtInt: - case Type::DependentExtInt: + case Type::BitInt: + case Type::DependentBitInt: + case Type::BTFTagAttributed: CanPrefixQualifiers = true; break; @@ -241,15 +255,20 @@ bool TypePrinter::canPrefixQualifiers(const Type *T, T->isObjCQualifiedIdType() || T->isObjCQualifiedClassType(); break; - case Type::ConstantArray: - case Type::IncompleteArray: case Type::VariableArray: case Type::DependentSizedArray: NeedARCStrongQualifier = true; - LLVM_FALLTHROUGH; + [[fallthrough]]; + + case Type::ConstantArray: + case Type::IncompleteArray: + return canPrefixQualifiers( + cast<ArrayType>(UnderlyingType)->getElementType().getTypePtr(), + NeedARCStrongQualifier); case Type::Adjusted: case Type::Decayed: + case Type::ArrayParameter: case Type::Pointer: case Type::BlockPointer: case Type::LValueReference: @@ -268,14 +287,21 @@ bool TypePrinter::canPrefixQualifiers(const Type *T, case Type::PackExpansion: case Type::SubstTemplateTypeParm: case Type::MacroQualified: + case Type::CountAttributed: CanPrefixQualifiers = false; break; case Type::Attributed: { // We still want to print the address_space before the type if it is an // address_space attribute. - const auto *AttrTy = cast<AttributedType>(T); + const auto *AttrTy = cast<AttributedType>(UnderlyingType); CanPrefixQualifiers = AttrTy->getAttrKind() == attr::AddressSpace; + break; + } + case Type::PackIndexing: { + return canPrefixQualifiers( + cast<PackIndexingType>(UnderlyingType)->getPattern().getTypePtr(), + NeedARCStrongQualifier); } } @@ -300,7 +326,7 @@ void TypePrinter::printBefore(const Type *T,Qualifiers Quals, raw_ostream &OS) { if (Policy.SuppressSpecifiers && T->isSpecifierType()) return; - SaveAndRestore<bool> PrevPHIsEmpty(HasEmptyPlaceHolder); + SaveAndRestore PrevPHIsEmpty(HasEmptyPlaceHolder); // Print qualifiers as appropriate. @@ -377,7 +403,7 @@ void TypePrinter::printComplexAfter(const ComplexType *T, raw_ostream &OS) { void TypePrinter::printPointerBefore(const PointerType *T, raw_ostream &OS) { IncludeStrongLifetimeRAII Strong(Policy); - SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); + SaveAndRestore NonEmptyPH(HasEmptyPlaceHolder, false); printBefore(T->getPointeeType(), OS); // Handle things like 'int (*A)[4];' correctly. // FIXME: this should include vectors, but vectors use attributes I guess. @@ -388,7 +414,7 @@ void TypePrinter::printPointerBefore(const PointerType *T, raw_ostream &OS) { void TypePrinter::printPointerAfter(const PointerType *T, raw_ostream &OS) { IncludeStrongLifetimeRAII Strong(Policy); - SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); + SaveAndRestore NonEmptyPH(HasEmptyPlaceHolder, false); // Handle things like 'int (*A)[4];' correctly. // FIXME: this should include vectors, but vectors use attributes I guess. if (isa<ArrayType>(T->getPointeeType())) @@ -398,14 +424,14 @@ void TypePrinter::printPointerAfter(const PointerType *T, raw_ostream &OS) { void TypePrinter::printBlockPointerBefore(const BlockPointerType *T, raw_ostream &OS) { - SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); + SaveAndRestore NonEmptyPH(HasEmptyPlaceHolder, false); printBefore(T->getPointeeType(), OS); OS << '^'; } void TypePrinter::printBlockPointerAfter(const BlockPointerType *T, raw_ostream &OS) { - SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); + SaveAndRestore NonEmptyPH(HasEmptyPlaceHolder, false); printAfter(T->getPointeeType(), OS); } @@ -420,7 +446,7 @@ static QualType skipTopLevelReferences(QualType T) { void TypePrinter::printLValueReferenceBefore(const LValueReferenceType *T, raw_ostream &OS) { IncludeStrongLifetimeRAII Strong(Policy); - SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); + SaveAndRestore NonEmptyPH(HasEmptyPlaceHolder, false); QualType Inner = skipTopLevelReferences(T->getPointeeTypeAsWritten()); printBefore(Inner, OS); // Handle things like 'int (&A)[4];' correctly. @@ -433,7 +459,7 @@ void TypePrinter::printLValueReferenceBefore(const LValueReferenceType *T, void TypePrinter::printLValueReferenceAfter(const LValueReferenceType *T, raw_ostream &OS) { IncludeStrongLifetimeRAII Strong(Policy); - SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); + SaveAndRestore NonEmptyPH(HasEmptyPlaceHolder, false); QualType Inner = skipTopLevelReferences(T->getPointeeTypeAsWritten()); // Handle things like 'int (&A)[4];' correctly. // FIXME: this should include vectors, but vectors use attributes I guess. @@ -445,7 +471,7 @@ void TypePrinter::printLValueReferenceAfter(const LValueReferenceType *T, void TypePrinter::printRValueReferenceBefore(const RValueReferenceType *T, raw_ostream &OS) { IncludeStrongLifetimeRAII Strong(Policy); - SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); + SaveAndRestore NonEmptyPH(HasEmptyPlaceHolder, false); QualType Inner = skipTopLevelReferences(T->getPointeeTypeAsWritten()); printBefore(Inner, OS); // Handle things like 'int (&&A)[4];' correctly. @@ -458,7 +484,7 @@ void TypePrinter::printRValueReferenceBefore(const RValueReferenceType *T, void TypePrinter::printRValueReferenceAfter(const RValueReferenceType *T, raw_ostream &OS) { IncludeStrongLifetimeRAII Strong(Policy); - SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); + SaveAndRestore NonEmptyPH(HasEmptyPlaceHolder, false); QualType Inner = skipTopLevelReferences(T->getPointeeTypeAsWritten()); // Handle things like 'int (&&A)[4];' correctly. // FIXME: this should include vectors, but vectors use attributes I guess. @@ -470,7 +496,7 @@ void TypePrinter::printRValueReferenceAfter(const RValueReferenceType *T, void TypePrinter::printMemberPointerBefore(const MemberPointerType *T, raw_ostream &OS) { IncludeStrongLifetimeRAII Strong(Policy); - SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); + SaveAndRestore NonEmptyPH(HasEmptyPlaceHolder, false); printBefore(T->getPointeeType(), OS); // Handle things like 'int (Cls::*A)[4];' correctly. // FIXME: this should include vectors, but vectors use attributes I guess. @@ -487,7 +513,7 @@ void TypePrinter::printMemberPointerBefore(const MemberPointerType *T, void TypePrinter::printMemberPointerAfter(const MemberPointerType *T, raw_ostream &OS) { IncludeStrongLifetimeRAII Strong(Policy); - SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); + SaveAndRestore NonEmptyPH(HasEmptyPlaceHolder, false); // Handle things like 'int (Cls::*A)[4];' correctly. // FIXME: this should include vectors, but vectors use attributes I guess. if (isa<ArrayType>(T->getPointeeType())) @@ -498,7 +524,6 @@ void TypePrinter::printMemberPointerAfter(const MemberPointerType *T, void TypePrinter::printConstantArrayBefore(const ConstantArrayType *T, raw_ostream &OS) { IncludeStrongLifetimeRAII Strong(Policy); - SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); printBefore(T->getElementType(), OS); } @@ -511,17 +536,16 @@ void TypePrinter::printConstantArrayAfter(const ConstantArrayType *T, OS << ' '; } - if (T->getSizeModifier() == ArrayType::Static) + if (T->getSizeModifier() == ArraySizeModifier::Static) OS << "static "; - OS << T->getSize().getZExtValue() << ']'; + OS << T->getZExtSize() << ']'; printAfter(T->getElementType(), OS); } void TypePrinter::printIncompleteArrayBefore(const IncompleteArrayType *T, raw_ostream &OS) { IncludeStrongLifetimeRAII Strong(Policy); - SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); printBefore(T->getElementType(), OS); } @@ -534,7 +558,6 @@ void TypePrinter::printIncompleteArrayAfter(const IncompleteArrayType *T, void TypePrinter::printVariableArrayBefore(const VariableArrayType *T, raw_ostream &OS) { IncludeStrongLifetimeRAII Strong(Policy); - SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); printBefore(T->getElementType(), OS); } @@ -546,9 +569,9 @@ void TypePrinter::printVariableArrayAfter(const VariableArrayType *T, OS << ' '; } - if (T->getSizeModifier() == VariableArrayType::Static) + if (T->getSizeModifier() == ArraySizeModifier::Static) OS << "static "; - else if (T->getSizeModifier() == VariableArrayType::Star) + else if (T->getSizeModifier() == ArraySizeModifier::Star) OS << '*'; if (T->getSizeExpr()) @@ -573,6 +596,16 @@ void TypePrinter::printDecayedBefore(const DecayedType *T, raw_ostream &OS) { printAdjustedBefore(T, OS); } +void TypePrinter::printArrayParameterAfter(const ArrayParameterType *T, + raw_ostream &OS) { + printConstantArrayAfter(T, OS); +} + +void TypePrinter::printArrayParameterBefore(const ArrayParameterType *T, + raw_ostream &OS) { + printConstantArrayBefore(T, OS); +} + void TypePrinter::printDecayedAfter(const DecayedType *T, raw_ostream &OS) { printAdjustedAfter(T, OS); } @@ -581,7 +614,6 @@ void TypePrinter::printDependentSizedArrayBefore( const DependentSizedArrayType *T, raw_ostream &OS) { IncludeStrongLifetimeRAII Strong(Policy); - SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); printBefore(T->getElementType(), OS); } @@ -612,43 +644,52 @@ void TypePrinter::printDependentAddressSpaceAfter( void TypePrinter::printDependentSizedExtVectorBefore( const DependentSizedExtVectorType *T, raw_ostream &OS) { + if (Policy.UseHLSLTypes) + OS << "vector<"; printBefore(T->getElementType(), OS); } void TypePrinter::printDependentSizedExtVectorAfter( const DependentSizedExtVectorType *T, raw_ostream &OS) { - OS << " __attribute__((ext_vector_type("; - if (T->getSizeExpr()) - T->getSizeExpr()->printPretty(OS, nullptr, Policy); - OS << ")))"; + if (Policy.UseHLSLTypes) { + OS << ", "; + if (T->getSizeExpr()) + T->getSizeExpr()->printPretty(OS, nullptr, Policy); + OS << ">"; + } else { + OS << " __attribute__((ext_vector_type("; + if (T->getSizeExpr()) + T->getSizeExpr()->printPretty(OS, nullptr, Policy); + OS << ")))"; + } printAfter(T->getElementType(), OS); } void TypePrinter::printVectorBefore(const VectorType *T, raw_ostream &OS) { switch (T->getVectorKind()) { - case VectorType::AltiVecPixel: + case VectorKind::AltiVecPixel: OS << "__vector __pixel "; break; - case VectorType::AltiVecBool: + case VectorKind::AltiVecBool: OS << "__vector __bool "; printBefore(T->getElementType(), OS); break; - case VectorType::AltiVecVector: + case VectorKind::AltiVecVector: OS << "__vector "; printBefore(T->getElementType(), OS); break; - case VectorType::NeonVector: + case VectorKind::Neon: OS << "__attribute__((neon_vector_type(" << T->getNumElements() << "))) "; printBefore(T->getElementType(), OS); break; - case VectorType::NeonPolyVector: + case VectorKind::NeonPoly: OS << "__attribute__((neon_polyvector_type(" << T->getNumElements() << "))) "; printBefore(T->getElementType(), OS); break; - case VectorType::GenericVector: { + case VectorKind::Generic: { // FIXME: We prefer to print the size directly here, but have no way // to get the size of the type. OS << "__attribute__((__vector_size__(" @@ -659,13 +700,13 @@ void TypePrinter::printVectorBefore(const VectorType *T, raw_ostream &OS) { printBefore(T->getElementType(), OS); break; } - case VectorType::SveFixedLengthDataVector: - case VectorType::SveFixedLengthPredicateVector: + case VectorKind::SveFixedLengthData: + case VectorKind::SveFixedLengthPredicate: // FIXME: We prefer to print the size directly here, but have no way // to get the size of the type. OS << "__attribute__((__arm_sve_vector_bits__("; - if (T->getVectorKind() == VectorType::SveFixedLengthPredicateVector) + if (T->getVectorKind() == VectorKind::SveFixedLengthPredicate) // Predicates take a bit per byte of the vector size, multiply by 8 to // get the number of bits passed to the attribute. OS << T->getNumElements() * 8; @@ -677,6 +718,21 @@ void TypePrinter::printVectorBefore(const VectorType *T, raw_ostream &OS) { // Multiply by 8 for the number of bits. OS << ") * 8))) "; printBefore(T->getElementType(), OS); + break; + case VectorKind::RVVFixedLengthData: + case VectorKind::RVVFixedLengthMask: + // FIXME: We prefer to print the size directly here, but have no way + // to get the size of the type. + OS << "__attribute__((__riscv_rvv_vector_bits__("; + + OS << T->getNumElements(); + + OS << " * sizeof("; + print(T->getElementType(), OS, StringRef()); + // Multiply by 8 for the number of bits. + OS << ") * 8))) "; + printBefore(T->getElementType(), OS); + break; } } @@ -687,32 +743,32 @@ void TypePrinter::printVectorAfter(const VectorType *T, raw_ostream &OS) { void TypePrinter::printDependentVectorBefore( const DependentVectorType *T, raw_ostream &OS) { switch (T->getVectorKind()) { - case VectorType::AltiVecPixel: + case VectorKind::AltiVecPixel: OS << "__vector __pixel "; break; - case VectorType::AltiVecBool: + case VectorKind::AltiVecBool: OS << "__vector __bool "; printBefore(T->getElementType(), OS); break; - case VectorType::AltiVecVector: + case VectorKind::AltiVecVector: OS << "__vector "; printBefore(T->getElementType(), OS); break; - case VectorType::NeonVector: + case VectorKind::Neon: OS << "__attribute__((neon_vector_type("; if (T->getSizeExpr()) T->getSizeExpr()->printPretty(OS, nullptr, Policy); OS << "))) "; printBefore(T->getElementType(), OS); break; - case VectorType::NeonPolyVector: + case VectorKind::NeonPoly: OS << "__attribute__((neon_polyvector_type("; if (T->getSizeExpr()) T->getSizeExpr()->printPretty(OS, nullptr, Policy); OS << "))) "; printBefore(T->getElementType(), OS); break; - case VectorType::GenericVector: { + case VectorKind::Generic: { // FIXME: We prefer to print the size directly here, but have no way // to get the size of the type. OS << "__attribute__((__vector_size__("; @@ -724,14 +780,14 @@ void TypePrinter::printDependentVectorBefore( printBefore(T->getElementType(), OS); break; } - case VectorType::SveFixedLengthDataVector: - case VectorType::SveFixedLengthPredicateVector: + case VectorKind::SveFixedLengthData: + case VectorKind::SveFixedLengthPredicate: // FIXME: We prefer to print the size directly here, but have no way // to get the size of the type. OS << "__attribute__((__arm_sve_vector_bits__("; if (T->getSizeExpr()) { T->getSizeExpr()->printPretty(OS, nullptr, Policy); - if (T->getVectorKind() == VectorType::SveFixedLengthPredicateVector) + if (T->getVectorKind() == VectorKind::SveFixedLengthPredicate) // Predicates take a bit per byte of the vector size, multiply by 8 to // get the number of bits passed to the attribute. OS << " * 8"; @@ -742,6 +798,22 @@ void TypePrinter::printDependentVectorBefore( } OS << "))) "; printBefore(T->getElementType(), OS); + break; + case VectorKind::RVVFixedLengthData: + case VectorKind::RVVFixedLengthMask: + // FIXME: We prefer to print the size directly here, but have no way + // to get the size of the type. + OS << "__attribute__((__riscv_rvv_vector_bits__("; + if (T->getSizeExpr()) { + T->getSizeExpr()->printPretty(OS, nullptr, Policy); + OS << " * sizeof("; + print(T->getElementType(), OS, StringRef()); + // Multiply by 8 for the number of bits. + OS << ") * 8"; + } + OS << "))) "; + printBefore(T->getElementType(), OS); + break; } } @@ -752,14 +824,23 @@ void TypePrinter::printDependentVectorAfter( void TypePrinter::printExtVectorBefore(const ExtVectorType *T, raw_ostream &OS) { + if (Policy.UseHLSLTypes) + OS << "vector<"; printBefore(T->getElementType(), OS); } void TypePrinter::printExtVectorAfter(const ExtVectorType *T, raw_ostream &OS) { printAfter(T->getElementType(), OS); - OS << " __attribute__((ext_vector_type("; - OS << T->getNumElements(); - OS << ")))"; + + if (Policy.UseHLSLTypes) { + OS << ", "; + OS << T->getNumElements(); + OS << ">"; + } else { + OS << " __attribute__((ext_vector_type("; + OS << T->getNumElements(); + OS << ")))"; + } } void TypePrinter::printConstantMatrixBefore(const ConstantMatrixType *T, @@ -833,7 +914,7 @@ void TypePrinter::printFunctionProtoBefore(const FunctionProtoType *T, OS << '('; } else { // If needed for precedence reasons, wrap the inner part in grouping parens. - SaveAndRestore<bool> PrevPHIsEmpty(HasEmptyPlaceHolder, false); + SaveAndRestore PrevPHIsEmpty(HasEmptyPlaceHolder, false); printBefore(T->getReturnType(), OS); if (!PrevPHIsEmpty.get()) OS << '('; @@ -861,7 +942,7 @@ void TypePrinter::printFunctionProtoAfter(const FunctionProtoType *T, // If needed for precedence reasons, wrap the inner part in grouping parens. if (!HasEmptyPlaceHolder) OS << ')'; - SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); + SaveAndRestore NonEmptyPH(HasEmptyPlaceHolder, false); OS << '('; { @@ -893,6 +974,28 @@ void TypePrinter::printFunctionProtoAfter(const FunctionProtoType *T, OS << ')'; FunctionType::ExtInfo Info = T->getExtInfo(); + unsigned SMEBits = T->getAArch64SMEAttributes(); + + if (SMEBits & FunctionType::SME_PStateSMCompatibleMask) + OS << " __arm_streaming_compatible"; + if (SMEBits & FunctionType::SME_PStateSMEnabledMask) + OS << " __arm_streaming"; + if (FunctionType::getArmZAState(SMEBits) == FunctionType::ARM_Preserves) + OS << " __arm_preserves(\"za\")"; + if (FunctionType::getArmZAState(SMEBits) == FunctionType::ARM_In) + OS << " __arm_in(\"za\")"; + if (FunctionType::getArmZAState(SMEBits) == FunctionType::ARM_Out) + OS << " __arm_out(\"za\")"; + if (FunctionType::getArmZAState(SMEBits) == FunctionType::ARM_InOut) + OS << " __arm_inout(\"za\")"; + if (FunctionType::getArmZT0State(SMEBits) == FunctionType::ARM_Preserves) + OS << " __arm_preserves(\"zt0\")"; + if (FunctionType::getArmZT0State(SMEBits) == FunctionType::ARM_In) + OS << " __arm_in(\"zt0\")"; + if (FunctionType::getArmZT0State(SMEBits) == FunctionType::ARM_Out) + OS << " __arm_out(\"zt0\")"; + if (FunctionType::getArmZT0State(SMEBits) == FunctionType::ARM_InOut) + OS << " __arm_inout(\"zt0\")"; printFunctionAfter(Info, OS); @@ -913,6 +1016,17 @@ void TypePrinter::printFunctionProtoAfter(const FunctionProtoType *T, } T->printExceptionSpecification(OS, Policy); + const FunctionEffectsRef FX = T->getFunctionEffects(); + for (const auto &CFE : FX) { + OS << " __attribute__((" << CFE.Effect.name(); + if (const Expr *E = CFE.Cond.getCondition()) { + OS << '('; + E->printPretty(OS, nullptr, Policy); + OS << ')'; + } + OS << "))"; + } + if (T->hasTrailingReturn()) { OS << " -> "; print(T->getReturnType(), OS, StringRef()); @@ -957,6 +1071,12 @@ void TypePrinter::printFunctionAfter(const FunctionType::ExtInfo &Info, case CC_AArch64VectorCall: OS << "__attribute__((aarch64_vector_pcs))"; break; + case CC_AArch64SVEPCS: + OS << "__attribute__((aarch64_sve_pcs))"; + break; + case CC_AMDGPUKernelCall: + OS << "__attribute__((amdgpu_kernel))"; + break; case CC_IntelOclBicc: OS << " __attribute__((intel_ocl_bicc))"; break; @@ -985,6 +1105,15 @@ void TypePrinter::printFunctionAfter(const FunctionType::ExtInfo &Info, case CC_PreserveAll: OS << " __attribute__((preserve_all))"; break; + case CC_M68kRTD: + OS << " __attribute__((m68k_rtd))"; + break; + case CC_PreserveNone: + OS << " __attribute__((preserve_none))"; + break; + case CC_RISCVVectorCall: + OS << "__attribute__((riscv_vector_cc))"; + break; } } @@ -1006,7 +1135,7 @@ void TypePrinter::printFunctionAfter(const FunctionType::ExtInfo &Info, void TypePrinter::printFunctionNoProtoBefore(const FunctionNoProtoType *T, raw_ostream &OS) { // If needed for precedence reasons, wrap the inner part in grouping parens. - SaveAndRestore<bool> PrevPHIsEmpty(HasEmptyPlaceHolder, false); + SaveAndRestore PrevPHIsEmpty(HasEmptyPlaceHolder, false); printBefore(T->getReturnType(), OS); if (!PrevPHIsEmpty.get()) OS << '('; @@ -1017,7 +1146,7 @@ void TypePrinter::printFunctionNoProtoAfter(const FunctionNoProtoType *T, // If needed for precedence reasons, wrap the inner part in grouping parens. if (!HasEmptyPlaceHolder) OS << ')'; - SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false); + SaveAndRestore NonEmptyPH(HasEmptyPlaceHolder, false); OS << "()"; printFunctionAfter(T->getExtInfo(), OS); @@ -1045,6 +1174,21 @@ void TypePrinter::printUnresolvedUsingBefore(const UnresolvedUsingType *T, void TypePrinter::printUnresolvedUsingAfter(const UnresolvedUsingType *T, raw_ostream &OS) {} +void TypePrinter::printUsingBefore(const UsingType *T, raw_ostream &OS) { + // After `namespace b { using a::X }`, is the type X within B a::X or b::X? + // + // - b::X is more formally correct given the UsingType model + // - b::X makes sense if "re-exporting" a symbol in a new namespace + // - a::X makes sense if "importing" a symbol for convenience + // + // The "importing" use seems much more common, so we print a::X. + // This could be a policy option, but the right choice seems to rest more + // with the intent of the code than the caller. + printTypeSpec(T->getFoundDecl()->getUnderlyingDecl(), OS); +} + +void TypePrinter::printUsingAfter(const UsingType *T, raw_ostream &OS) {} + void TypePrinter::printTypedefBefore(const TypedefType *T, raw_ostream &OS) { printTypeSpec(T->getDecl(), OS); } @@ -1068,7 +1212,8 @@ void TypePrinter::printTypedefAfter(const TypedefType *T, raw_ostream &OS) {} void TypePrinter::printTypeOfExprBefore(const TypeOfExprType *T, raw_ostream &OS) { - OS << "typeof "; + OS << (T->getKind() == TypeOfKind::Unqualified ? "typeof_unqual " + : "typeof "); if (T->getUnderlyingExpr()) T->getUnderlyingExpr()->printPretty(OS, nullptr, Policy); spaceBeforePlaceHolder(OS); @@ -1078,8 +1223,9 @@ void TypePrinter::printTypeOfExprAfter(const TypeOfExprType *T, raw_ostream &OS) {} void TypePrinter::printTypeOfBefore(const TypeOfType *T, raw_ostream &OS) { - OS << "typeof("; - print(T->getUnderlyingType(), OS, StringRef()); + OS << (T->getKind() == TypeOfKind::Unqualified ? "typeof_unqual(" + : "typeof("); + print(T->getUnmodifiedType(), OS, StringRef()); OS << ')'; spaceBeforePlaceHolder(OS); } @@ -1094,35 +1240,40 @@ void TypePrinter::printDecltypeBefore(const DecltypeType *T, raw_ostream &OS) { spaceBeforePlaceHolder(OS); } +void TypePrinter::printPackIndexingBefore(const PackIndexingType *T, + raw_ostream &OS) { + if (T->hasSelectedType()) { + OS << T->getSelectedType(); + } else { + OS << T->getPattern() << "...["; + T->getIndexExpr()->printPretty(OS, nullptr, Policy); + OS << "]"; + } + spaceBeforePlaceHolder(OS); +} + +void TypePrinter::printPackIndexingAfter(const PackIndexingType *T, + raw_ostream &OS) {} + void TypePrinter::printDecltypeAfter(const DecltypeType *T, raw_ostream &OS) {} void TypePrinter::printUnaryTransformBefore(const UnaryTransformType *T, raw_ostream &OS) { IncludeStrongLifetimeRAII Strong(Policy); - switch (T->getUTTKind()) { - case UnaryTransformType::EnumUnderlyingType: - OS << "__underlying_type("; - print(T->getBaseType(), OS, StringRef()); - OS << ')'; - spaceBeforePlaceHolder(OS); - return; - } - - printBefore(T->getBaseType(), OS); + static llvm::DenseMap<int, const char *> Transformation = {{ +#define TRANSFORM_TYPE_TRAIT_DEF(Enum, Trait) \ + {UnaryTransformType::Enum, "__" #Trait}, +#include "clang/Basic/TransformTypeTraits.def" + }}; + OS << Transformation[T->getUTTKind()] << '('; + print(T->getBaseType(), OS, StringRef()); + OS << ')'; + spaceBeforePlaceHolder(OS); } void TypePrinter::printUnaryTransformAfter(const UnaryTransformType *T, - raw_ostream &OS) { - IncludeStrongLifetimeRAII Strong(Policy); - - switch (T->getUTTKind()) { - case UnaryTransformType::EnumUnderlyingType: - return; - } - - printAfter(T->getBaseType(), OS); -} + raw_ostream &OS) {} void TypePrinter::printAutoBefore(const AutoType *T, raw_ostream &OS) { // If the type has been deduced, do not print 'auto'. @@ -1199,26 +1350,26 @@ void TypePrinter::printPipeBefore(const PipeType *T, raw_ostream &OS) { void TypePrinter::printPipeAfter(const PipeType *T, raw_ostream &OS) {} -void TypePrinter::printExtIntBefore(const ExtIntType *T, raw_ostream &OS) { +void TypePrinter::printBitIntBefore(const BitIntType *T, raw_ostream &OS) { if (T->isUnsigned()) OS << "unsigned "; - OS << "_ExtInt(" << T->getNumBits() << ")"; + OS << "_BitInt(" << T->getNumBits() << ")"; spaceBeforePlaceHolder(OS); } -void TypePrinter::printExtIntAfter(const ExtIntType *T, raw_ostream &OS) {} +void TypePrinter::printBitIntAfter(const BitIntType *T, raw_ostream &OS) {} -void TypePrinter::printDependentExtIntBefore(const DependentExtIntType *T, +void TypePrinter::printDependentBitIntBefore(const DependentBitIntType *T, raw_ostream &OS) { if (T->isUnsigned()) OS << "unsigned "; - OS << "_ExtInt("; + OS << "_BitInt("; T->getNumBitsExpr()->printPretty(OS, nullptr, Policy); OS << ")"; spaceBeforePlaceHolder(OS); } -void TypePrinter::printDependentExtIntAfter(const DependentExtIntType *T, +void TypePrinter::printDependentBitIntAfter(const DependentBitIntType *T, raw_ostream &OS) {} /// Appends the given scope to the end of a string. @@ -1328,11 +1479,20 @@ void TypePrinter::printTag(TagDecl *D, raw_ostream &OS) { if (PLoc.isValid()) { OS << " at "; StringRef File = PLoc.getFilename(); + llvm::SmallString<1024> WrittenFile(File); if (auto *Callbacks = Policy.Callbacks) - OS << Callbacks->remapPath(File); - else - OS << File; - OS << ':' << PLoc.getLine() << ':' << PLoc.getColumn(); + WrittenFile = Callbacks->remapPath(File); + // Fix inconsistent path separator created by + // clang::DirectoryLookup::LookupFile when the file path is relative + // path. + llvm::sys::path::Style Style = + llvm::sys::path::is_absolute(WrittenFile) + ? llvm::sys::path::Style::native + : (Policy.MSVCFormatting + ? llvm::sys::path::Style::windows_backslash + : llvm::sys::path::Style::posix); + llvm::sys::path::native(WrittenFile, Style); + OS << WrittenFile << ':' << PLoc.getLine() << ':' << PLoc.getColumn(); } } @@ -1341,21 +1501,18 @@ void TypePrinter::printTag(TagDecl *D, raw_ostream &OS) { // If this is a class template specialization, print the template // arguments. - if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(D)) { - ArrayRef<TemplateArgument> Args; - TypeSourceInfo *TAW = Spec->getTypeAsWritten(); - if (!Policy.PrintCanonicalTypes && TAW) { - const TemplateSpecializationType *TST = - cast<TemplateSpecializationType>(TAW->getType()); - Args = TST->template_arguments(); - } else { - const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs(); - Args = TemplateArgs.asArray(); - } + if (auto *S = dyn_cast<ClassTemplateSpecializationDecl>(D)) { + const TemplateParameterList *TParams = + S->getSpecializedTemplate()->getTemplateParameters(); + const ASTTemplateArgumentListInfo *TArgAsWritten = + S->getTemplateArgsAsWritten(); IncludeStrongLifetimeRAII Strong(Policy); - printTemplateArgumentList( - OS, Args, Policy, - Spec->getSpecializedTemplate()->getTemplateParameters()); + if (TArgAsWritten && !Policy.PrintCanonicalTypes) + printTemplateArgumentList(OS, TArgAsWritten->arguments(), Policy, + TParams); + else + printTemplateArgumentList(OS, S->getTemplateArgs().asArray(), Policy, + TParams); } spaceBeforePlaceHolder(OS); @@ -1363,9 +1520,11 @@ void TypePrinter::printTag(TagDecl *D, raw_ostream &OS) { void TypePrinter::printRecordBefore(const RecordType *T, raw_ostream &OS) { // Print the preferred name if we have one for this type. - for (const auto *PNA : T->getDecl()->specific_attrs<PreferredNameAttr>()) { - if (declaresSameEntity(PNA->getTypedefType()->getAsCXXRecordDecl(), - T->getDecl())) { + if (Policy.UsePreferredNames) { + for (const auto *PNA : T->getDecl()->specific_attrs<PreferredNameAttr>()) { + if (!declaresSameEntity(PNA->getTypedefType()->getAsCXXRecordDecl(), + T->getDecl())) + continue; // Find the outermost typedef or alias template. QualType T = PNA->getTypedefType(); while (true) { @@ -1399,7 +1558,8 @@ void TypePrinter::printTemplateTypeParmBefore(const TemplateTypeParmType *T, } OS << "auto"; } else if (IdentifierInfo *Id = T->getIdentifier()) - OS << Id->getName(); + OS << (Policy.CleanUglifiedParameters ? Id->deuglifiedName() + : Id->getName()); else OS << "type-parameter-" << T->getDepth() << '-' << T->getIndex(); @@ -1427,14 +1587,27 @@ void TypePrinter::printSubstTemplateTypeParmPackBefore( const SubstTemplateTypeParmPackType *T, raw_ostream &OS) { IncludeStrongLifetimeRAII Strong(Policy); - printTemplateTypeParmBefore(T->getReplacedParameter(), OS); + if (const TemplateTypeParmDecl *D = T->getReplacedParameter()) { + if (D && D->isImplicit()) { + if (auto *TC = D->getTypeConstraint()) { + TC->print(OS, Policy); + OS << ' '; + } + OS << "auto"; + } else if (IdentifierInfo *Id = D->getIdentifier()) + OS << (Policy.CleanUglifiedParameters ? Id->deuglifiedName() + : Id->getName()); + else + OS << "type-parameter-" << D->getDepth() << '-' << D->getIndex(); + + spaceBeforePlaceHolder(OS); + } } void TypePrinter::printSubstTemplateTypeParmPackAfter( const SubstTemplateTypeParmPackType *T, raw_ostream &OS) { IncludeStrongLifetimeRAII Strong(Policy); - printTemplateTypeParmAfter(T->getReplacedParameter(), OS); } void TypePrinter::printTemplateId(const TemplateSpecializationType *T, @@ -1442,17 +1615,19 @@ void TypePrinter::printTemplateId(const TemplateSpecializationType *T, IncludeStrongLifetimeRAII Strong(Policy); TemplateDecl *TD = T->getTemplateName().getAsTemplateDecl(); + // FIXME: Null TD never exercised in test suite. if (FullyQualify && TD) { if (!Policy.SuppressScope) AppendScope(TD->getDeclContext(), OS, TD->getDeclName()); - IdentifierInfo *II = TD->getIdentifier(); - OS << II->getName(); + OS << TD->getName(); } else { - T->getTemplateName().print(OS, Policy); + T->getTemplateName().print(OS, Policy, TemplateName::Qualified::None); } - printTemplateArgumentList(OS, T->template_arguments(), Policy); + DefaultTemplateArgsPolicyRAII TemplateArgs(Policy); + const TemplateParameterList *TPL = TD ? TD->getTemplateParameters() : nullptr; + printTemplateArgumentList(OS, T->template_arguments(), Policy, TPL); spaceBeforePlaceHolder(OS); } @@ -1492,13 +1667,29 @@ void TypePrinter::printElaboratedBefore(const ElaboratedType *T, return; } + if (Policy.SuppressElaboration) { + printBefore(T->getNamedType(), OS); + return; + } + // The tag definition will take care of these. if (!Policy.IncludeTagDefinition) { OS << TypeWithKeyword::getKeywordName(T->getKeyword()); - if (T->getKeyword() != ETK_None) + if (T->getKeyword() != ElaboratedTypeKeyword::None) OS << " "; NestedNameSpecifier *Qualifier = T->getQualifier(); + if (!Policy.SuppressTagKeyword && Policy.SuppressScope && + !Policy.SuppressUnwrittenScope) { + bool OldTagKeyword = Policy.SuppressTagKeyword; + bool OldSupressScope = Policy.SuppressScope; + Policy.SuppressTagKeyword = true; + Policy.SuppressScope = false; + printBefore(T->getNamedType(), OS); + Policy.SuppressTagKeyword = OldTagKeyword; + Policy.SuppressScope = OldSupressScope; + return; + } if (Qualifier) Qualifier->print(OS, Policy); } @@ -1511,6 +1702,12 @@ void TypePrinter::printElaboratedAfter(const ElaboratedType *T, raw_ostream &OS) { if (Policy.IncludeTagDefinition && T->getOwnedTagDecl()) return; + + if (Policy.SuppressElaboration) { + printAfter(T->getNamedType(), OS); + return; + } + ElaboratedTypePolicyRAII PolicyRAII(Policy); printAfter(T->getNamedType(), OS); } @@ -1534,7 +1731,7 @@ void TypePrinter::printParenAfter(const ParenType *T, raw_ostream &OS) { void TypePrinter::printDependentNameBefore(const DependentNameType *T, raw_ostream &OS) { OS << TypeWithKeyword::getKeywordName(T->getKeyword()); - if (T->getKeyword() != ETK_None) + if (T->getKeyword() != ElaboratedTypeKeyword::None) OS << " "; T->getQualifier()->print(OS, Policy); @@ -1551,7 +1748,7 @@ void TypePrinter::printDependentTemplateSpecializationBefore( IncludeStrongLifetimeRAII Strong(Policy); OS << TypeWithKeyword::getKeywordName(T->getKeyword()); - if (T->getKeyword() != ETK_None) + if (T->getKeyword() != ElaboratedTypeKeyword::None) OS << " "; if (T->getQualifier()) @@ -1575,6 +1772,37 @@ void TypePrinter::printPackExpansionAfter(const PackExpansionType *T, OS << "..."; } +static void printCountAttributedImpl(const CountAttributedType *T, + raw_ostream &OS, + const PrintingPolicy &Policy) { + OS << ' '; + if (T->isCountInBytes() && T->isOrNull()) + OS << "__sized_by_or_null("; + else if (T->isCountInBytes()) + OS << "__sized_by("; + else if (T->isOrNull()) + OS << "__counted_by_or_null("; + else + OS << "__counted_by("; + if (T->getCountExpr()) + T->getCountExpr()->printPretty(OS, nullptr, Policy); + OS << ')'; +} + +void TypePrinter::printCountAttributedBefore(const CountAttributedType *T, + raw_ostream &OS) { + printBefore(T->desugar(), OS); + if (!T->isArrayType()) + printCountAttributedImpl(T, OS, Policy); +} + +void TypePrinter::printCountAttributedAfter(const CountAttributedType *T, + raw_ostream &OS) { + printAfter(T->desugar(), OS); + if (T->isArrayType()) + printCountAttributedImpl(T, OS, Policy); +} + void TypePrinter::printAttributedBefore(const AttributedType *T, raw_ostream &OS) { // FIXME: Generate this with TableGen. @@ -1603,6 +1831,9 @@ void TypePrinter::printAttributedBefore(const AttributedType *T, spaceBeforePlaceHolder(OS); } + if (T->isWebAssemblyFuncrefSpec()) + OS << "__funcref"; + // Print nullability type specifiers. if (T->getImmediateNullability()) { if (T->getAttrKind() == attr::TypeNonNull) @@ -1630,14 +1861,14 @@ void TypePrinter::printAttributedAfter(const AttributedType *T, // If this is a calling convention attribute, don't print the implicit CC from // the modified type. - SaveAndRestore<bool> MaybeSuppressCC(InsideCCAttribute, T->isCallingConv()); + SaveAndRestore MaybeSuppressCC(InsideCCAttribute, T->isCallingConv()); printAfter(T->getModifiedType(), OS); // Some attributes are printed as qualifiers before the type, so we have // nothing left to do. - if (T->getAttrKind() == attr::ObjCKindOf || - T->isMSTypeSpec() || T->getImmediateNullability()) + if (T->getAttrKind() == attr::ObjCKindOf || T->isMSTypeSpec() || + T->getImmediateNullability() || T->isWebAssemblyFuncrefSpec()) return; // Don't print the inert __unsafe_unretained attribute at all. @@ -1661,6 +1892,24 @@ void TypePrinter::printAttributedAfter(const AttributedType *T, if (T->getAttrKind() == attr::AddressSpace) return; + if (T->getAttrKind() == attr::AnnotateType) { + // FIXME: Print the attribute arguments once we have a way to retrieve these + // here. For the meantime, we just print `[[clang::annotate_type(...)]]` + // without the arguments so that we know at least that we had _some_ + // annotation on the type. + OS << " [[clang::annotate_type(...)]]"; + return; + } + + if (T->getAttrKind() == attr::ArmStreaming) { + OS << "__arm_streaming"; + return; + } + if (T->getAttrKind() == attr::ArmStreamingCompatible) { + OS << "__arm_streaming_compatible"; + return; + } + OS << " __attribute__(("; switch (T->getAttrKind()) { #define TYPE_ATTR(NAME) @@ -1669,6 +1918,9 @@ void TypePrinter::printAttributedAfter(const AttributedType *T, #include "clang/Basic/AttrList.inc" llvm_unreachable("non-type attribute attached to type"); + case attr::BTFTypeTag: + llvm_unreachable("BTFTypeTag attribute handled separately"); + case attr::OpenCLPrivateAddressSpace: case attr::OpenCLGlobalAddressSpace: case attr::OpenCLGlobalDeviceAddressSpace: @@ -1676,10 +1928,15 @@ void TypePrinter::printAttributedAfter(const AttributedType *T, case attr::OpenCLLocalAddressSpace: case attr::OpenCLConstantAddressSpace: case attr::OpenCLGenericAddressSpace: + case attr::HLSLGroupSharedAddressSpace: // FIXME: Update printAttributedBefore to print these once we generate // AttributedType nodes for them. break; + case attr::CountedBy: + case attr::CountedByOrNull: + case attr::SizedBy: + case attr::SizedByOrNull: case attr::LifetimeBound: case attr::TypeNonNull: case attr::TypeNullable: @@ -1695,6 +1952,18 @@ void TypePrinter::printAttributedAfter(const AttributedType *T, case attr::UPtr: case attr::AddressSpace: case attr::CmseNSCall: + case attr::AnnotateType: + case attr::WebAssemblyFuncref: + case attr::ArmStreaming: + case attr::ArmStreamingCompatible: + case attr::ArmIn: + case attr::ArmOut: + case attr::ArmInOut: + case attr::ArmPreserves: + case attr::NonBlocking: + case attr::NonAllocating: + case attr::Blocking: + case attr::Allocating: llvm_unreachable("This attribute should have been handled already"); case attr::NSReturnsRetained: @@ -1726,6 +1995,8 @@ void TypePrinter::printAttributedAfter(const AttributedType *T, break; } case attr::AArch64VectorPcs: OS << "aarch64_vector_pcs"; break; + case attr::AArch64SVEPcs: OS << "aarch64_sve_pcs"; break; + case attr::AMDGPUKernelCall: OS << "amdgpu_kernel"; break; case attr::IntelOclBicc: OS << "inteloclbicc"; break; case attr::PreserveMost: OS << "preserve_most"; @@ -1734,6 +2005,15 @@ void TypePrinter::printAttributedAfter(const AttributedType *T, case attr::PreserveAll: OS << "preserve_all"; break; + case attr::M68kRTD: + OS << "m68k_rtd"; + break; + case attr::PreserveNone: + OS << "preserve_none"; + break; + case attr::RISCVVectorCC: + OS << "riscv_vector_cc"; + break; case attr::NoDeref: OS << "noderef"; break; @@ -1743,10 +2023,25 @@ void TypePrinter::printAttributedAfter(const AttributedType *T, case attr::ArmMveStrictPolymorphism: OS << "__clang_arm_mve_strict_polymorphism"; break; + + // Nothing to print for this attribute. + case attr::HLSLParamModifier: + break; } OS << "))"; } +void TypePrinter::printBTFTagAttributedBefore(const BTFTagAttributedType *T, + raw_ostream &OS) { + printBefore(T->getWrappedType(), OS); + OS << " __attribute__((btf_type_tag(\"" << T->getAttr()->getBTFTypeTag() << "\")))"; +} + +void TypePrinter::printBTFTagAttributedAfter(const BTFTagAttributedType *T, + raw_ostream &OS) { + printAfter(T->getWrappedType(), OS); +} + void TypePrinter::printObjCInterfaceBefore(const ObjCInterfaceType *T, raw_ostream &OS) { OS << T->getDecl()->getName(); @@ -1921,11 +2216,11 @@ static bool isSubstitutedType(ASTContext &Ctx, QualType T, QualType Pattern, if (!isSubstitutedTemplateArgument(Ctx, Template, PTST->getTemplateName(), Args, Depth)) return false; - if (TemplateArgs.size() != PTST->getNumArgs()) + if (TemplateArgs.size() != PTST->template_arguments().size()) return false; for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I) - if (!isSubstitutedTemplateArgument(Ctx, TemplateArgs[I], PTST->getArg(I), - Args, Depth)) + if (!isSubstitutedTemplateArgument( + Ctx, TemplateArgs[I], PTST->template_arguments()[I], Args, Depth)) return false; return true; } @@ -1934,6 +2229,36 @@ static bool isSubstitutedType(ASTContext &Ctx, QualType T, QualType Pattern, return false; } +/// Evaluates the expression template argument 'Pattern' and returns true +/// if 'Arg' evaluates to the same result. +static bool templateArgumentExpressionsEqual(ASTContext const &Ctx, + TemplateArgument const &Pattern, + TemplateArgument const &Arg) { + if (Pattern.getKind() != TemplateArgument::Expression) + return false; + + // Can't evaluate value-dependent expressions so bail early + Expr const *pattern_expr = Pattern.getAsExpr(); + if (pattern_expr->isValueDependent() || + !pattern_expr->isIntegerConstantExpr(Ctx)) + return false; + + if (Arg.getKind() == TemplateArgument::Integral) + return llvm::APSInt::isSameValue(pattern_expr->EvaluateKnownConstInt(Ctx), + Arg.getAsIntegral()); + + if (Arg.getKind() == TemplateArgument::Expression) { + Expr const *args_expr = Arg.getAsExpr(); + if (args_expr->isValueDependent() || !args_expr->isIntegerConstantExpr(Ctx)) + return false; + + return llvm::APSInt::isSameValue(args_expr->EvaluateKnownConstInt(Ctx), + pattern_expr->EvaluateKnownConstInt(Ctx)); + } + + return false; +} + static bool isSubstitutedTemplateArgument(ASTContext &Ctx, TemplateArgument Arg, TemplateArgument Pattern, ArrayRef<TemplateArgument> Args, @@ -1952,6 +2277,9 @@ static bool isSubstitutedTemplateArgument(ASTContext &Ctx, TemplateArgument Arg, } } + if (templateArgumentExpressionsEqual(Ctx, Pattern, Arg)) + return true; + if (Arg.getKind() != Pattern.getKind()) return false; @@ -1971,9 +2299,7 @@ static bool isSubstitutedTemplateArgument(ASTContext &Ctx, TemplateArgument Arg, return false; } -/// Make a best-effort determination of whether the type T can be produced by -/// substituting Args into the default argument of Param. -static bool isSubstitutedDefaultArgument(ASTContext &Ctx, TemplateArgument Arg, +bool clang::isSubstitutedDefaultArgument(ASTContext &Ctx, TemplateArgument Arg, const NamedDecl *Param, ArrayRef<TemplateArgument> Args, unsigned Depth) { @@ -1983,42 +2309,38 @@ static bool isSubstitutedDefaultArgument(ASTContext &Ctx, TemplateArgument Arg, if (auto *TTPD = dyn_cast<TemplateTypeParmDecl>(Param)) { return TTPD->hasDefaultArgument() && - isSubstitutedTemplateArgument(Ctx, Arg, TTPD->getDefaultArgument(), - Args, Depth); + isSubstitutedTemplateArgument( + Ctx, Arg, TTPD->getDefaultArgument().getArgument(), Args, Depth); } else if (auto *TTPD = dyn_cast<TemplateTemplateParmDecl>(Param)) { return TTPD->hasDefaultArgument() && isSubstitutedTemplateArgument( Ctx, Arg, TTPD->getDefaultArgument().getArgument(), Args, Depth); } else if (auto *NTTPD = dyn_cast<NonTypeTemplateParmDecl>(Param)) { return NTTPD->hasDefaultArgument() && - isSubstitutedTemplateArgument(Ctx, Arg, NTTPD->getDefaultArgument(), - Args, Depth); + isSubstitutedTemplateArgument( + Ctx, Arg, NTTPD->getDefaultArgument().getArgument(), Args, + Depth); } return false; } template <typename TA> -static void printTo(raw_ostream &OS, ArrayRef<TA> Args, - const PrintingPolicy &Policy, bool SkipBrackets, - const TemplateParameterList *TPL, bool IsPack, - unsigned ParmIndex) { +static void +printTo(raw_ostream &OS, ArrayRef<TA> Args, const PrintingPolicy &Policy, + const TemplateParameterList *TPL, bool IsPack, unsigned ParmIndex) { // Drop trailing template arguments that match default arguments. if (TPL && Policy.SuppressDefaultTemplateArgs && !Policy.PrintCanonicalTypes && !Args.empty() && !IsPack && Args.size() <= TPL->size()) { - ASTContext &Ctx = TPL->getParam(0)->getASTContext(); llvm::SmallVector<TemplateArgument, 8> OrigArgs; for (const TA &A : Args) OrigArgs.push_back(getArgument(A)); - while (!Args.empty() && - isSubstitutedDefaultArgument(Ctx, getArgument(Args.back()), - TPL->getParam(Args.size() - 1), - OrigArgs, TPL->getDepth())) + while (!Args.empty() && getArgument(Args.back()).getIsDefaulted()) Args = Args.drop_back(); } const char *Comma = Policy.MSVCFormatting ? "," : ", "; - if (!SkipBrackets) + if (!IsPack) OS << '<'; bool NeedSpace = false; @@ -2031,42 +2353,43 @@ static void printTo(raw_ostream &OS, ArrayRef<TA> Args, if (Argument.getKind() == TemplateArgument::Pack) { if (Argument.pack_size() && !FirstArg) OS << Comma; - printTo(ArgOS, Argument.getPackAsArray(), Policy, true, TPL, + printTo(ArgOS, Argument.getPackAsArray(), Policy, TPL, /*IsPack*/ true, ParmIndex); } else { if (!FirstArg) OS << Comma; // Tries to print the argument with location info if exists. - printArgument( - Arg, Policy, ArgOS, - TemplateParameterList::shouldIncludeTypeForArgument(TPL, ParmIndex)); + printArgument(Arg, Policy, ArgOS, + TemplateParameterList::shouldIncludeTypeForArgument( + Policy, TPL, ParmIndex)); } StringRef ArgString = ArgOS.str(); // If this is the first argument and its string representation // begins with the global scope specifier ('::foo'), add a space // to avoid printing the diagraph '<:'. - if (FirstArg && !ArgString.empty() && ArgString[0] == ':') + if (FirstArg && ArgString.starts_with(":")) OS << ' '; OS << ArgString; // If the last character of our string is '>', add another space to // keep the two '>''s separate tokens. - NeedSpace = Policy.SplitTemplateClosers && !ArgString.empty() && - ArgString.back() == '>'; - FirstArg = false; + if (!ArgString.empty()) { + NeedSpace = Policy.SplitTemplateClosers && ArgString.back() == '>'; + FirstArg = false; + } // Use same template parameter for all elements of Pack if (!IsPack) ParmIndex++; } - if (NeedSpace) - OS << ' '; - - if (!SkipBrackets) + if (!IsPack) { + if (NeedSpace) + OS << ' '; OS << '>'; + } } void clang::printTemplateArgumentList(raw_ostream &OS, @@ -2080,14 +2403,14 @@ void clang::printTemplateArgumentList(raw_ostream &OS, ArrayRef<TemplateArgument> Args, const PrintingPolicy &Policy, const TemplateParameterList *TPL) { - printTo(OS, Args, Policy, false, TPL, /*isPack*/ false, /*parmIndex*/ 0); + printTo(OS, Args, Policy, TPL, /*isPack*/ false, /*parmIndex*/ 0); } void clang::printTemplateArgumentList(raw_ostream &OS, ArrayRef<TemplateArgumentLoc> Args, const PrintingPolicy &Policy, const TemplateParameterList *TPL) { - printTo(OS, Args, Policy, false, TPL, /*isPack*/ false, /*parmIndex*/ 0); + printTo(OS, Args, Policy, TPL, /*isPack*/ false, /*parmIndex*/ 0); } std::string Qualifiers::getAsString() const { @@ -2157,6 +2480,10 @@ std::string Qualifiers::getAddrSpaceAsString(LangAS AS) { return "__uptr __ptr32"; case LangAS::ptr64: return "__ptr64"; + case LangAS::wasm_funcref: + return "__funcref"; + case LangAS::hlsl_groupshared: + return "groupshared"; default: return std::to_string(toTargetAddressSpace(AS)); } @@ -2272,3 +2599,9 @@ void QualType::getAsStringInternal(const Type *ty, Qualifiers qs, std::string str = std::string(StrOS.str()); buffer.swap(str); } + +raw_ostream &clang::operator<<(raw_ostream &OS, QualType QT) { + SplitQualType S = QT.split(); + TypePrinter(LangOptions()).print(S.Ty, S.Quals, OS, /*PlaceHolder=*/""); + return OS; +} diff --git a/contrib/llvm-project/clang/lib/AST/VTTBuilder.cpp b/contrib/llvm-project/clang/lib/AST/VTTBuilder.cpp index d58e87517785..464a2014c430 100644 --- a/contrib/llvm-project/clang/lib/AST/VTTBuilder.cpp +++ b/contrib/llvm-project/clang/lib/AST/VTTBuilder.cpp @@ -189,7 +189,7 @@ void VTTBuilder::LayoutVTT(BaseSubobject Base, bool BaseIsVirtual) { if (!IsPrimaryVTT) { // Remember the sub-VTT index. - SubVTTIndicies[Base] = VTTComponents.size(); + SubVTTIndices[Base] = VTTComponents.size(); } uint64_t VTableIndex = VTTVTables.size(); diff --git a/contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp b/contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp index 38d6fc28e098..e941c3bedb0a 100644 --- a/contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp +++ b/contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp @@ -17,6 +17,7 @@ #include "clang/AST/RecordLayout.h" #include "clang/Basic/TargetInfo.h" #include "llvm/ADT/SetOperations.h" +#include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/Support/Format.h" #include "llvm/Support/raw_ostream.h" @@ -421,7 +422,7 @@ void FinalOverriders::dump(raw_ostream &Out, BaseSubobject Base, Out << ", " << Overrider.Offset.getQuantity() << ')'; BaseOffset Offset; - if (!Overrider.Method->isPure()) + if (!Overrider.Method->isPureVirtual()) Offset = ComputeReturnAdjustmentBaseOffset(Context, Overrider.Method, MD); if (!Offset.isEmpty()) { @@ -664,13 +665,18 @@ CharUnits VCallAndVBaseOffsetBuilder::getCurrentOffsetOffset() const { // vtable address point. (We subtract 3 to account for the information just // above the address point, the RTTI info, the offset to top, and the // vcall offset itself). - int64_t OffsetIndex = -(int64_t)(3 + Components.size()); + size_t NumComponentsAboveAddrPoint = 3; + if (Context.getLangOpts().OmitVTableRTTI) + NumComponentsAboveAddrPoint--; + int64_t OffsetIndex = + -(int64_t)(NumComponentsAboveAddrPoint + Components.size()); // Under the relative ABI, the offset widths are 32-bit ints instead of // pointer widths. CharUnits OffsetWidth = Context.toCharUnitsFromBits( - VTables.isRelativeLayout() ? 32 - : Context.getTargetInfo().getPointerWidth(0)); + VTables.isRelativeLayout() + ? 32 + : Context.getTargetInfo().getPointerWidth(LangAS::Default)); CharUnits OffsetOffset = OffsetWidth * OffsetIndex; return OffsetOffset; @@ -1070,7 +1076,7 @@ void ItaniumVTableBuilder::AddThunk(const CXXMethodDecl *MD, SmallVectorImpl<ThunkInfo> &ThunksVector = Thunks[MD]; // Check if we have this thunk already. - if (llvm::find(ThunksVector, Thunk) != ThunksVector.end()) + if (llvm::is_contained(ThunksVector, Thunk)) return; ThunksVector.push_back(Thunk); @@ -1141,11 +1147,41 @@ void ItaniumVTableBuilder::ComputeThisAdjustments() { continue; // Add it. - VTableThunks[VTableIndex].This = ThisAdjustment; + auto SetThisAdjustmentThunk = [&](uint64_t Idx) { + // If a this pointer adjustment is required, record the method that + // created the vtable entry. MD is not necessarily the method that + // created the entry since derived classes overwrite base class + // information in MethodInfoMap, hence findOriginalMethodInMap is called + // here. + // + // For example, in the following class hierarchy, if MD = D1::m and + // Overrider = D2:m, the original method that created the entry is B0:m, + // which is what findOriginalMethodInMap(MD) returns: + // + // struct B0 { int a; virtual void m(); }; + // struct D0 : B0 { int a; void m() override; }; + // struct D1 : B0 { int a; void m() override; }; + // struct D2 : D0, D1 { int a; void m() override; }; + // + // We need to record the method because we cannot + // call findOriginalMethod to find the method that created the entry if + // the method in the entry requires adjustment. + // + // Do not set ThunkInfo::Method if Idx is already in VTableThunks. This + // can happen when covariant return adjustment is required too. + if (!VTableThunks.count(Idx)) { + const CXXMethodDecl *Method = VTables.findOriginalMethodInMap(MD); + VTableThunks[Idx].Method = Method; + VTableThunks[Idx].ThisType = Method->getThisType().getTypePtr(); + } + VTableThunks[Idx].This = ThisAdjustment; + }; + + SetThisAdjustmentThunk(VTableIndex); if (isa<CXXDestructorDecl>(MD)) { // Add an adjustment for the deleting destructor as well. - VTableThunks[VTableIndex + 1].This = ThisAdjustment; + SetThisAdjustmentThunk(VTableIndex + 1); } } @@ -1255,7 +1291,7 @@ ThisAdjustment ItaniumVTableBuilder::ComputeThisAdjustment( const CXXMethodDecl *MD, CharUnits BaseOffsetInLayoutClass, FinalOverriders::OverriderInfo Overrider) { // Ignore adjustments for pure virtual member functions. - if (Overrider.Method->isPure()) + if (Overrider.Method->isPureVirtual()) return ThisAdjustment(); BaseSubobject OverriddenBaseSubobject(MD->getParent(), @@ -1418,8 +1454,7 @@ FindNearestOverriddenMethod(const CXXMethodDecl *MD, OverriddenMethodsSetTy OverriddenMethods; ComputeAllOverriddenMethods(MD, OverriddenMethods); - for (const CXXRecordDecl *PrimaryBase : - llvm::make_range(Bases.rbegin(), Bases.rend())) { + for (const CXXRecordDecl *PrimaryBase : llvm::reverse(Bases)) { // Now check the overridden methods. for (const CXXMethodDecl *OverriddenMD : OverriddenMethods) { // We found our overridden method. @@ -1504,6 +1539,8 @@ void ItaniumVTableBuilder::AddMethods( FindNearestOverriddenMethod(MD, PrimaryBases)) { if (ComputeReturnAdjustmentBaseOffset(Context, MD, OverriddenMD).isEmpty()) { + VTables.setOriginalMethod(MD, OverriddenMD); + // Replace the method info of the overridden method with our own // method. assert(MethodInfoMap.count(OverriddenMD) && @@ -1542,7 +1579,8 @@ void ItaniumVTableBuilder::AddMethods( // This is a virtual thunk for the most derived class, add it. AddThunk(Overrider.Method, - ThunkInfo(ThisAdjustment, ReturnAdjustment)); + ThunkInfo(ThisAdjustment, ReturnAdjustment, + OverriddenMD->getThisType().getTypePtr())); } } @@ -1559,6 +1597,8 @@ void ItaniumVTableBuilder::AddMethods( std::stable_sort( NewImplicitVirtualFunctions.begin(), NewImplicitVirtualFunctions.end(), [](const CXXMethodDecl *A, const CXXMethodDecl *B) { + if (A == B) + return false; if (A->isCopyAssignmentOperator() != B->isCopyAssignmentOperator()) return A->isCopyAssignmentOperator(); if (A->isMoveAssignmentOperator() != B->isMoveAssignmentOperator()) @@ -1600,7 +1640,7 @@ void ItaniumVTableBuilder::AddMethods( // Check if this overrider needs a return adjustment. // We don't want to do this for pure virtual member functions. BaseOffset ReturnAdjustmentOffset; - if (!OverriderMD->isPure()) { + if (!OverriderMD->isPureVirtual()) { ReturnAdjustmentOffset = ComputeReturnAdjustmentBaseOffset(Context, OverriderMD, MD); } @@ -1608,6 +1648,15 @@ void ItaniumVTableBuilder::AddMethods( ReturnAdjustment ReturnAdjustment = ComputeReturnAdjustment(ReturnAdjustmentOffset); + // If a return adjustment is required, record the method that created the + // vtable entry. We need to record the method because we cannot call + // findOriginalMethod to find the method that created the entry if the + // method in the entry requires adjustment. + if (!ReturnAdjustment.isEmpty()) { + VTableThunks[Components.size()].Method = MD; + VTableThunks[Components.size()].ThisType = MD->getThisType().getTypePtr(); + } + AddMethod(Overrider.Method, ReturnAdjustment); } } @@ -1666,7 +1715,8 @@ void ItaniumVTableBuilder::LayoutPrimaryAndSecondaryVTables( Components.push_back(VTableComponent::MakeOffsetToTop(OffsetToTop)); // Next, add the RTTI. - Components.push_back(VTableComponent::MakeRTTI(MostDerivedClass)); + if (!Context.getLangOpts().OmitVTableRTTI) + Components.push_back(VTableComponent::MakeRTTI(MostDerivedClass)); uint64_t AddressPoint = Components.size(); @@ -1882,11 +1932,31 @@ void ItaniumVTableBuilder::LayoutVTablesForVirtualBases( } } +static void printThunkMethod(const ThunkInfo &Info, raw_ostream &Out) { + if (!Info.Method) + return; + std::string Str = PredefinedExpr::ComputeName( + PredefinedIdentKind::PrettyFunctionNoVirtual, Info.Method); + Out << " method: " << Str; +} + /// dumpLayout - Dump the vtable layout. void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) { // FIXME: write more tests that actually use the dumpLayout output to prevent // ItaniumVTableBuilder regressions. + Out << "Original map\n"; + + for (const auto &P : VTables.getOriginalMethodMap()) { + std::string Str0 = + PredefinedExpr::ComputeName(PredefinedIdentKind::PrettyFunctionNoVirtual, + P.first); + std::string Str1 = + PredefinedExpr::ComputeName(PredefinedIdentKind::PrettyFunctionNoVirtual, + P.second); + Out << " " << Str0 << " -> " << Str1 << "\n"; + } + if (isBuildingConstructorVTable()) { Out << "Construction vtable for ('"; MostDerivedClass->printQualifiedName(Out); @@ -1948,11 +2018,10 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) { case VTableComponent::CK_FunctionPointer: { const CXXMethodDecl *MD = Component.getFunctionDecl(); - std::string Str = - PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual, - MD); + std::string Str = PredefinedExpr::ComputeName( + PredefinedIdentKind::PrettyFunctionNoVirtual, MD); Out << Str; - if (MD->isPure()) + if (MD->isPureVirtual()) Out << " [pure]"; if (MD->isDeleted()) @@ -1971,6 +2040,7 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) { } Out << ']'; + printThunkMethod(Thunk, Out); } // If this function pointer has a 'this' pointer adjustment, dump it. @@ -1984,6 +2054,7 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) { } Out << ']'; + printThunkMethod(Thunk, Out); } } @@ -2003,7 +2074,7 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) { else Out << "() [deleting]"; - if (DD->isPure()) + if (DD->isPureVirtual()) Out << " [pure]"; ThunkInfo Thunk = VTableThunks.lookup(I); @@ -2020,6 +2091,7 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) { Out << ']'; } + printThunkMethod(Thunk, Out); } break; @@ -2028,11 +2100,10 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) { case VTableComponent::CK_UnusedFunctionPointer: { const CXXMethodDecl *MD = Component.getUnusedFunctionDecl(); - std::string Str = - PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual, - MD); + std::string Str = PredefinedExpr::ComputeName( + PredefinedIdentKind::PrettyFunctionNoVirtual, MD); Out << "[unused] " << Str; - if (MD->isPure()) + if (MD->isPureVirtual()) Out << " [pure]"; } @@ -2107,9 +2178,8 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) { for (const auto &I : Thunks) { const CXXMethodDecl *MD = I.first; - std::string MethodName = - PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual, - MD); + std::string MethodName = PredefinedExpr::ComputeName( + PredefinedIdentKind::PrettyFunctionNoVirtual, MD); MethodNamesAndDecls.insert(std::make_pair(MethodName, MD)); } @@ -2120,7 +2190,6 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) { ThunkInfoVectorTy ThunksVector = Thunks[MD]; llvm::sort(ThunksVector, [](const ThunkInfo &LHS, const ThunkInfo &RHS) { - assert(LHS.Method == nullptr && RHS.Method == nullptr); return std::tie(LHS.This, LHS.Return) < std::tie(RHS.This, RHS.Return); }); @@ -2173,9 +2242,8 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) { continue; MD = MD->getCanonicalDecl(); - std::string MethodName = - PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual, - MD); + std::string MethodName = PredefinedExpr::ComputeName( + PredefinedIdentKind::PrettyFunctionNoVirtual, MD); if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) { GlobalDecl GD(DD, Dtor_Complete); @@ -2258,7 +2326,7 @@ VTableLayout::VTableLayout(ArrayRef<size_t> VTableIndices, VTableLayout::~VTableLayout() { } bool VTableContextBase::hasVtableSlot(const CXXMethodDecl *MD) { - return MD->isVirtual() && !MD->isConsteval(); + return MD->isVirtual() && !MD->isImmediateFunction(); } ItaniumVTableContext::ItaniumVTableContext( @@ -2310,6 +2378,35 @@ ItaniumVTableContext::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD, return I->second; } +GlobalDecl ItaniumVTableContext::findOriginalMethod(GlobalDecl GD) { + const auto *MD = cast<CXXMethodDecl>(GD.getDecl()); + computeVTableRelatedInformation(MD->getParent()); + const CXXMethodDecl *OriginalMD = findOriginalMethodInMap(MD); + + if (const auto *DD = dyn_cast<CXXDestructorDecl>(OriginalMD)) + return GlobalDecl(DD, GD.getDtorType()); + return OriginalMD; +} + +const CXXMethodDecl * +ItaniumVTableContext::findOriginalMethodInMap(const CXXMethodDecl *MD) const { + // Traverse the chain of virtual methods until we find the method that added + // the v-table slot. + while (true) { + auto I = OriginalMethodMap.find(MD); + + // MD doesn't exist in OriginalMethodMap, so it must be the method we are + // looking for. + if (I == OriginalMethodMap.end()) + break; + + // Set MD to the overridden method. + MD = I->second; + } + + return MD; +} + static std::unique_ptr<VTableLayout> CreateVTableLayout(const ItaniumVTableBuilder &Builder) { SmallVector<VTableLayout::VTableThunkTy, 1> @@ -2329,7 +2426,7 @@ ItaniumVTableContext::computeVTableRelatedInformation(const CXXRecordDecl *RD) { return; ItaniumVTableBuilder Builder(*this, RD, CharUnits::Zero(), - /*MostDerivedClassIsVirtual=*/0, RD); + /*MostDerivedClassIsVirtual=*/false, RD); Entry = CreateVTableLayout(Builder); MethodVTableIndices.insert(Builder.vtable_indices_begin(), @@ -2498,7 +2595,7 @@ private: SmallVector<ThunkInfo, 1> &ThunksVector = Thunks[MD]; // Check if we have this thunk already. - if (llvm::find(ThunksVector, Thunk) != ThunksVector.end()) + if (llvm::is_contained(ThunksVector, Thunk)) return; ThunksVector.push_back(Thunk); @@ -3072,7 +3169,7 @@ void VFTableBuilder::AddMethods(BaseSubobject Base, unsigned BaseDepth, // We don't want to do this for pure virtual member functions. BaseOffset ReturnAdjustmentOffset; ReturnAdjustment ReturnAdjustment; - if (!FinalOverriderMD->isPure()) { + if (!FinalOverriderMD->isPureVirtual()) { ReturnAdjustmentOffset = ComputeReturnAdjustmentBaseOffset(Context, FinalOverriderMD, MD); } @@ -3090,16 +3187,15 @@ void VFTableBuilder::AddMethods(BaseSubobject Base, unsigned BaseDepth, ReturnAdjustmentOffset.VirtualBase); } } - + auto ThisType = (OverriddenMD ? OverriddenMD : MD)->getThisType().getTypePtr(); AddMethod(FinalOverriderMD, - ThunkInfo(ThisAdjustmentOffset, ReturnAdjustment, + ThunkInfo(ThisAdjustmentOffset, ReturnAdjustment, ThisType, ForceReturnAdjustmentMangling ? MD : nullptr)); } } static void PrintBasePath(const VPtrInfo::BasePath &Path, raw_ostream &Out) { - for (const CXXRecordDecl *Elem : - llvm::make_range(Path.rbegin(), Path.rend())) { + for (const CXXRecordDecl *Elem : llvm::reverse(Path)) { Out << "'"; Elem->printQualifiedName(Out); Out << "' in "; @@ -3115,8 +3211,7 @@ static void dumpMicrosoftThunkAdjustment(const ThunkInfo &TI, raw_ostream &Out, if (!ContinueFirstLine) Out << LinePrefix; Out << "[return adjustment (to type '" - << TI.Method->getReturnType().getCanonicalType().getAsString() - << "'): "; + << TI.Method->getReturnType().getCanonicalType() << "'): "; if (R.Virtual.Microsoft.VBPtrOffset) Out << "vbptr at offset " << R.Virtual.Microsoft.VBPtrOffset << ", "; if (R.Virtual.Microsoft.VBIndex) @@ -3171,9 +3266,9 @@ void VFTableBuilder::dumpLayout(raw_ostream &Out) { // FIXME: Figure out how to print the real thunk type, since they can // differ in the return type. std::string Str = PredefinedExpr::ComputeName( - PredefinedExpr::PrettyFunctionNoVirtual, MD); + PredefinedIdentKind::PrettyFunctionNoVirtual, MD); Out << Str; - if (MD->isPure()) + if (MD->isPureVirtual()) Out << " [pure]"; if (MD->isDeleted()) @@ -3192,7 +3287,7 @@ void VFTableBuilder::dumpLayout(raw_ostream &Out) { DD->printQualifiedName(Out); Out << "() [scalar deleting]"; - if (DD->isPure()) + if (DD->isPureVirtual()) Out << " [pure]"; ThunkInfo Thunk = VTableThunks.lookup(I); @@ -3226,7 +3321,7 @@ void VFTableBuilder::dumpLayout(raw_ostream &Out) { for (const auto &I : Thunks) { const CXXMethodDecl *MD = I.first; std::string MethodName = PredefinedExpr::ComputeName( - PredefinedExpr::PrettyFunctionNoVirtual, MD); + PredefinedIdentKind::PrettyFunctionNoVirtual, MD); MethodNamesAndDecls.insert(std::make_pair(MethodName, MD)); } @@ -3386,10 +3481,8 @@ static bool rebucketPaths(VPtrInfoVector &Paths) { // sorted vector to implement a multiset to form the buckets. Note that the // ordering is based on pointers, but it doesn't change our output order. The // current algorithm is designed to match MSVC 2012's names. - llvm::SmallVector<std::reference_wrapper<VPtrInfo>, 2> PathsSorted; - PathsSorted.reserve(Paths.size()); - for (auto& P : Paths) - PathsSorted.push_back(*P); + llvm::SmallVector<std::reference_wrapper<VPtrInfo>, 2> PathsSorted( + llvm::make_pointee_range(Paths)); llvm::sort(PathsSorted, [](const VPtrInfo &LHS, const VPtrInfo &RHS) { return LHS.MangledPath < RHS.MangledPath; }); @@ -3454,7 +3547,7 @@ static void removeRedundantPaths(std::list<FullPathTy> &FullPaths) { if (&SpecificPath == &OtherPath) continue; if (llvm::all_of(SpecificPath, [&](const BaseSubobject &BSO) { - return OtherPath.count(BSO) != 0; + return OtherPath.contains(BSO); })) { return true; } @@ -3660,7 +3753,7 @@ void MicrosoftVTableContext::dumpMethodLocations( assert(hasVtableSlot(MD)); std::string MethodName = PredefinedExpr::ComputeName( - PredefinedExpr::PrettyFunctionNoVirtual, MD); + PredefinedIdentKind::PrettyFunctionNoVirtual, MD); if (isa<CXXDestructorDecl>(MD)) { IndicesMap[I.second] = MethodName + " [scalar deleting]"; |